VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 73951

Last change on this file since 73951 was 73937, checked in by vboxsync, 7 years ago

VMM/IEM, HM: Nested VMX: bugref:9180 Implemented VMWRITE instruction.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 625.0 KB
Line 
1/* $Id: IEMAll.cpp 73937 2018-08-29 06:12:35Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211/**
212 * CPU exception classes.
213 */
214typedef enum IEMXCPTCLASS
215{
216 IEMXCPTCLASS_BENIGN,
217 IEMXCPTCLASS_CONTRIBUTORY,
218 IEMXCPTCLASS_PAGE_FAULT,
219 IEMXCPTCLASS_DOUBLE_FAULT
220} IEMXCPTCLASS;
221
222
223/*********************************************************************************************************************************
224* Defined Constants And Macros *
225*********************************************************************************************************************************/
226/** @def IEM_WITH_SETJMP
227 * Enables alternative status code handling using setjmps.
228 *
229 * This adds a bit of expense via the setjmp() call since it saves all the
230 * non-volatile registers. However, it eliminates return code checks and allows
231 * for more optimal return value passing (return regs instead of stack buffer).
232 */
233#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
234# define IEM_WITH_SETJMP
235#endif
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in a 64-bit code segment.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Check if we're currently executing in real mode.
335 *
336 * @returns @c true if it is, @c false if not.
337 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
338 */
339#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
340
341/**
342 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
343 * @returns PCCPUMFEATURES
344 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
345 */
346#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
347
348/**
349 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
350 * @returns PCCPUMFEATURES
351 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
352 */
353#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
354
355/**
356 * Evaluates to true if we're presenting an Intel CPU to the guest.
357 */
358#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
359
360/**
361 * Evaluates to true if we're presenting an AMD CPU to the guest.
362 */
363#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
364
365/**
366 * Check if the address is canonical.
367 */
368#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
369
370/**
371 * Gets the effective VEX.VVVV value.
372 *
373 * The 4th bit is ignored if not 64-bit code.
374 * @returns effective V-register value.
375 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
376 */
377#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
378 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
379
380/** @def IEM_USE_UNALIGNED_DATA_ACCESS
381 * Use unaligned accesses instead of elaborate byte assembly. */
382#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
383# define IEM_USE_UNALIGNED_DATA_ACCESS
384#endif
385
386#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
387/**
388 * Check the common VMX instruction preconditions.
389 */
390#define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
391 do { \
392 if (!IEM_IS_VMX_ENABLED(a_pVCpu)) \
393 { \
394 Log((a_szInstr ": CR4.VMXE not enabled -> #UD\n")); \
395 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_Vmxe; \
396 return iemRaiseUndefinedOpcode(a_pVCpu); \
397 } \
398 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
399 { \
400 Log((a_szInstr ": Real or v8086 mode -> #UD\n")); \
401 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_RealOrV86Mode; \
402 return iemRaiseUndefinedOpcode(a_pVCpu); \
403 } \
404 if (IEM_IS_LONG_MODE(a_pVCpu) && !IEM_IS_64BIT_CODE(a_pVCpu)) \
405 { \
406 Log((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
407 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_LongModeCS; \
408 return iemRaiseUndefinedOpcode(a_pVCpu); \
409 } \
410 } while (0)
411
412/**
413 * Check if VMX is enabled.
414 */
415# define IEM_IS_VMX_ENABLED(a_pVCpu) (CPUMIsGuestVmxEnabled(IEM_GET_CTX(a_pVCpu)))
416
417/**
418 * Check if the guest has entered VMX root operation.
419 */
420#define IEM_IS_VMX_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(pVCpu)))
421
422/**
423 * Check if the guest has entered VMX non-root operation.
424 */
425#define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
426
427#else
428# define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) do { } while (0)
429# define IEM_IS_VMX_ENABLED(a_pVCpu) (false)
430# define IEM_IS_VMX_ROOT_MODE(a_pVCpu) (false)
431# define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu) (false)
432
433#endif
434
435#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
436/**
437 * Check the common SVM instruction preconditions.
438 */
439# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
440 do { \
441 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
442 { \
443 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
444 return iemRaiseUndefinedOpcode(a_pVCpu); \
445 } \
446 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
447 { \
448 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
449 return iemRaiseUndefinedOpcode(a_pVCpu); \
450 } \
451 if ((a_pVCpu)->iem.s.uCpl != 0) \
452 { \
453 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
454 return iemRaiseGeneralProtectionFault0(a_pVCpu); \
455 } \
456 } while (0)
457
458/**
459 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
460 */
461# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
462 do { \
463 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
464 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
465 } while (0)
466
467/**
468 * Check if SVM is enabled.
469 */
470# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
471
472/**
473 * Check if an SVM control/instruction intercept is set.
474 */
475# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
476
477/**
478 * Check if an SVM read CRx intercept is set.
479 */
480# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
481
482/**
483 * Check if an SVM write CRx intercept is set.
484 */
485# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
486
487/**
488 * Check if an SVM read DRx intercept is set.
489 */
490# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
491
492/**
493 * Check if an SVM write DRx intercept is set.
494 */
495# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
496
497/**
498 * Check if an SVM exception intercept is set.
499 */
500# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
501
502/**
503 * Get the SVM pause-filter count.
504 */
505# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (CPUMGetGuestSvmPauseFilterCount(a_pVCpu, IEM_GET_CTX(a_pVCpu)))
506
507/**
508 * Invokes the SVM \#VMEXIT handler for the nested-guest.
509 */
510# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
511 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
512
513/**
514 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
515 * corresponding decode assist information.
516 */
517# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
518 do \
519 { \
520 uint64_t uExitInfo1; \
521 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
522 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
523 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
524 else \
525 uExitInfo1 = 0; \
526 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
527 } while (0)
528
529#else
530# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
531# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
532# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
533# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
534# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
535# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
536# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
537# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
538# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
539# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (0)
540# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
541# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
542
543#endif
544
545
546/*********************************************************************************************************************************
547* Global Variables *
548*********************************************************************************************************************************/
549extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
550
551
552/** Function table for the ADD instruction. */
553IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
554{
555 iemAImpl_add_u8, iemAImpl_add_u8_locked,
556 iemAImpl_add_u16, iemAImpl_add_u16_locked,
557 iemAImpl_add_u32, iemAImpl_add_u32_locked,
558 iemAImpl_add_u64, iemAImpl_add_u64_locked
559};
560
561/** Function table for the ADC instruction. */
562IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
563{
564 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
565 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
566 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
567 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
568};
569
570/** Function table for the SUB instruction. */
571IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
572{
573 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
574 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
575 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
576 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
577};
578
579/** Function table for the SBB instruction. */
580IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
581{
582 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
583 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
584 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
585 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
586};
587
588/** Function table for the OR instruction. */
589IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
590{
591 iemAImpl_or_u8, iemAImpl_or_u8_locked,
592 iemAImpl_or_u16, iemAImpl_or_u16_locked,
593 iemAImpl_or_u32, iemAImpl_or_u32_locked,
594 iemAImpl_or_u64, iemAImpl_or_u64_locked
595};
596
597/** Function table for the XOR instruction. */
598IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
599{
600 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
601 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
602 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
603 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
604};
605
606/** Function table for the AND instruction. */
607IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
608{
609 iemAImpl_and_u8, iemAImpl_and_u8_locked,
610 iemAImpl_and_u16, iemAImpl_and_u16_locked,
611 iemAImpl_and_u32, iemAImpl_and_u32_locked,
612 iemAImpl_and_u64, iemAImpl_and_u64_locked
613};
614
615/** Function table for the CMP instruction.
616 * @remarks Making operand order ASSUMPTIONS.
617 */
618IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
619{
620 iemAImpl_cmp_u8, NULL,
621 iemAImpl_cmp_u16, NULL,
622 iemAImpl_cmp_u32, NULL,
623 iemAImpl_cmp_u64, NULL
624};
625
626/** Function table for the TEST instruction.
627 * @remarks Making operand order ASSUMPTIONS.
628 */
629IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
630{
631 iemAImpl_test_u8, NULL,
632 iemAImpl_test_u16, NULL,
633 iemAImpl_test_u32, NULL,
634 iemAImpl_test_u64, NULL
635};
636
637/** Function table for the BT instruction. */
638IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
639{
640 NULL, NULL,
641 iemAImpl_bt_u16, NULL,
642 iemAImpl_bt_u32, NULL,
643 iemAImpl_bt_u64, NULL
644};
645
646/** Function table for the BTC instruction. */
647IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
648{
649 NULL, NULL,
650 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
651 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
652 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
653};
654
655/** Function table for the BTR instruction. */
656IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
657{
658 NULL, NULL,
659 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
660 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
661 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
662};
663
664/** Function table for the BTS instruction. */
665IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
666{
667 NULL, NULL,
668 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
669 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
670 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
671};
672
673/** Function table for the BSF instruction. */
674IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
675{
676 NULL, NULL,
677 iemAImpl_bsf_u16, NULL,
678 iemAImpl_bsf_u32, NULL,
679 iemAImpl_bsf_u64, NULL
680};
681
682/** Function table for the BSR instruction. */
683IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
684{
685 NULL, NULL,
686 iemAImpl_bsr_u16, NULL,
687 iemAImpl_bsr_u32, NULL,
688 iemAImpl_bsr_u64, NULL
689};
690
691/** Function table for the IMUL instruction. */
692IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
693{
694 NULL, NULL,
695 iemAImpl_imul_two_u16, NULL,
696 iemAImpl_imul_two_u32, NULL,
697 iemAImpl_imul_two_u64, NULL
698};
699
700/** Group 1 /r lookup table. */
701IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
702{
703 &g_iemAImpl_add,
704 &g_iemAImpl_or,
705 &g_iemAImpl_adc,
706 &g_iemAImpl_sbb,
707 &g_iemAImpl_and,
708 &g_iemAImpl_sub,
709 &g_iemAImpl_xor,
710 &g_iemAImpl_cmp
711};
712
713/** Function table for the INC instruction. */
714IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
715{
716 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
717 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
718 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
719 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
720};
721
722/** Function table for the DEC instruction. */
723IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
724{
725 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
726 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
727 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
728 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
729};
730
731/** Function table for the NEG instruction. */
732IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
733{
734 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
735 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
736 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
737 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
738};
739
740/** Function table for the NOT instruction. */
741IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
742{
743 iemAImpl_not_u8, iemAImpl_not_u8_locked,
744 iemAImpl_not_u16, iemAImpl_not_u16_locked,
745 iemAImpl_not_u32, iemAImpl_not_u32_locked,
746 iemAImpl_not_u64, iemAImpl_not_u64_locked
747};
748
749
750/** Function table for the ROL instruction. */
751IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
752{
753 iemAImpl_rol_u8,
754 iemAImpl_rol_u16,
755 iemAImpl_rol_u32,
756 iemAImpl_rol_u64
757};
758
759/** Function table for the ROR instruction. */
760IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
761{
762 iemAImpl_ror_u8,
763 iemAImpl_ror_u16,
764 iemAImpl_ror_u32,
765 iemAImpl_ror_u64
766};
767
768/** Function table for the RCL instruction. */
769IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
770{
771 iemAImpl_rcl_u8,
772 iemAImpl_rcl_u16,
773 iemAImpl_rcl_u32,
774 iemAImpl_rcl_u64
775};
776
777/** Function table for the RCR instruction. */
778IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
779{
780 iemAImpl_rcr_u8,
781 iemAImpl_rcr_u16,
782 iemAImpl_rcr_u32,
783 iemAImpl_rcr_u64
784};
785
786/** Function table for the SHL instruction. */
787IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
788{
789 iemAImpl_shl_u8,
790 iemAImpl_shl_u16,
791 iemAImpl_shl_u32,
792 iemAImpl_shl_u64
793};
794
795/** Function table for the SHR instruction. */
796IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
797{
798 iemAImpl_shr_u8,
799 iemAImpl_shr_u16,
800 iemAImpl_shr_u32,
801 iemAImpl_shr_u64
802};
803
804/** Function table for the SAR instruction. */
805IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
806{
807 iemAImpl_sar_u8,
808 iemAImpl_sar_u16,
809 iemAImpl_sar_u32,
810 iemAImpl_sar_u64
811};
812
813
814/** Function table for the MUL instruction. */
815IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
816{
817 iemAImpl_mul_u8,
818 iemAImpl_mul_u16,
819 iemAImpl_mul_u32,
820 iemAImpl_mul_u64
821};
822
823/** Function table for the IMUL instruction working implicitly on rAX. */
824IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
825{
826 iemAImpl_imul_u8,
827 iemAImpl_imul_u16,
828 iemAImpl_imul_u32,
829 iemAImpl_imul_u64
830};
831
832/** Function table for the DIV instruction. */
833IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
834{
835 iemAImpl_div_u8,
836 iemAImpl_div_u16,
837 iemAImpl_div_u32,
838 iemAImpl_div_u64
839};
840
841/** Function table for the MUL instruction. */
842IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
843{
844 iemAImpl_idiv_u8,
845 iemAImpl_idiv_u16,
846 iemAImpl_idiv_u32,
847 iemAImpl_idiv_u64
848};
849
850/** Function table for the SHLD instruction */
851IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
852{
853 iemAImpl_shld_u16,
854 iemAImpl_shld_u32,
855 iemAImpl_shld_u64,
856};
857
858/** Function table for the SHRD instruction */
859IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
860{
861 iemAImpl_shrd_u16,
862 iemAImpl_shrd_u32,
863 iemAImpl_shrd_u64,
864};
865
866
867/** Function table for the PUNPCKLBW instruction */
868IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
869/** Function table for the PUNPCKLBD instruction */
870IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
871/** Function table for the PUNPCKLDQ instruction */
872IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
873/** Function table for the PUNPCKLQDQ instruction */
874IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
875
876/** Function table for the PUNPCKHBW instruction */
877IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
878/** Function table for the PUNPCKHBD instruction */
879IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
880/** Function table for the PUNPCKHDQ instruction */
881IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
882/** Function table for the PUNPCKHQDQ instruction */
883IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
884
885/** Function table for the PXOR instruction */
886IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
887/** Function table for the PCMPEQB instruction */
888IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
889/** Function table for the PCMPEQW instruction */
890IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
891/** Function table for the PCMPEQD instruction */
892IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
893
894
895#if defined(IEM_LOG_MEMORY_WRITES)
896/** What IEM just wrote. */
897uint8_t g_abIemWrote[256];
898/** How much IEM just wrote. */
899size_t g_cbIemWrote;
900#endif
901
902
903/*********************************************************************************************************************************
904* Internal Functions *
905*********************************************************************************************************************************/
906IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
907IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
908IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
909IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
910/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
911IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
912IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
913IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
914IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
915IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
916IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
917IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
918IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
919IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
920IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
921IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
922IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
923#ifdef IEM_WITH_SETJMP
924DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
925DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
926DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
927DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
928DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
929#endif
930
931IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
932IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
933IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
934IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
935IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
936IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
937IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
938IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
939IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
940IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
941IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
942IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
943IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
944IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
945IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
946IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
947IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
948
949#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
950IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
951IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
952#endif
953
954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
955IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, PCVMXEXITINSTRINFO pExitInstrInfo,
956 RTGCPTR GCPtrDisp);
957#endif
958
959/**
960 * Sets the pass up status.
961 *
962 * @returns VINF_SUCCESS.
963 * @param pVCpu The cross context virtual CPU structure of the
964 * calling thread.
965 * @param rcPassUp The pass up status. Must be informational.
966 * VINF_SUCCESS is not allowed.
967 */
968IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
969{
970 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
971
972 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
973 if (rcOldPassUp == VINF_SUCCESS)
974 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
975 /* If both are EM scheduling codes, use EM priority rules. */
976 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
977 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
978 {
979 if (rcPassUp < rcOldPassUp)
980 {
981 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
982 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
983 }
984 else
985 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
986 }
987 /* Override EM scheduling with specific status code. */
988 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
989 {
990 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
991 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
992 }
993 /* Don't override specific status code, first come first served. */
994 else
995 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
996 return VINF_SUCCESS;
997}
998
999
1000/**
1001 * Calculates the CPU mode.
1002 *
1003 * This is mainly for updating IEMCPU::enmCpuMode.
1004 *
1005 * @returns CPU mode.
1006 * @param pVCpu The cross context virtual CPU structure of the
1007 * calling thread.
1008 */
1009DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1010{
1011 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1012 return IEMMODE_64BIT;
1013 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1014 return IEMMODE_32BIT;
1015 return IEMMODE_16BIT;
1016}
1017
1018
1019/**
1020 * Initializes the execution state.
1021 *
1022 * @param pVCpu The cross context virtual CPU structure of the
1023 * calling thread.
1024 * @param fBypassHandlers Whether to bypass access handlers.
1025 *
1026 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1027 * side-effects in strict builds.
1028 */
1029DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1030{
1031 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1032 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1033
1034#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1035 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1036 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1039 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1040 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1041 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1042 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1043#endif
1044
1045#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1046 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1047#endif
1048 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1049 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1050#ifdef VBOX_STRICT
1051 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1052 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1053 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1054 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1055 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1056 pVCpu->iem.s.uRexReg = 127;
1057 pVCpu->iem.s.uRexB = 127;
1058 pVCpu->iem.s.offModRm = 127;
1059 pVCpu->iem.s.uRexIndex = 127;
1060 pVCpu->iem.s.iEffSeg = 127;
1061 pVCpu->iem.s.idxPrefix = 127;
1062 pVCpu->iem.s.uVex3rdReg = 127;
1063 pVCpu->iem.s.uVexLength = 127;
1064 pVCpu->iem.s.fEvexStuff = 127;
1065 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1066# ifdef IEM_WITH_CODE_TLB
1067 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1068 pVCpu->iem.s.pbInstrBuf = NULL;
1069 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1070 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1071 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1072 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1073# else
1074 pVCpu->iem.s.offOpcode = 127;
1075 pVCpu->iem.s.cbOpcode = 127;
1076# endif
1077#endif
1078
1079 pVCpu->iem.s.cActiveMappings = 0;
1080 pVCpu->iem.s.iNextMapping = 0;
1081 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1082 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1083#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1084 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1085 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1086 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1087 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1088 if (!pVCpu->iem.s.fInPatchCode)
1089 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1090#endif
1091}
1092
1093#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1094/**
1095 * Performs a minimal reinitialization of the execution state.
1096 *
1097 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1098 * 'world-switch' types operations on the CPU. Currently only nested
1099 * hardware-virtualization uses it.
1100 *
1101 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1102 */
1103IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1104{
1105 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1106 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1107
1108 pVCpu->iem.s.uCpl = uCpl;
1109 pVCpu->iem.s.enmCpuMode = enmMode;
1110 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1111 pVCpu->iem.s.enmEffAddrMode = enmMode;
1112 if (enmMode != IEMMODE_64BIT)
1113 {
1114 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1115 pVCpu->iem.s.enmEffOpSize = enmMode;
1116 }
1117 else
1118 {
1119 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1120 pVCpu->iem.s.enmEffOpSize = enmMode;
1121 }
1122 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1123#ifndef IEM_WITH_CODE_TLB
1124 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1125 pVCpu->iem.s.offOpcode = 0;
1126 pVCpu->iem.s.cbOpcode = 0;
1127#endif
1128 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1129}
1130#endif
1131
1132/**
1133 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1134 *
1135 * @param pVCpu The cross context virtual CPU structure of the
1136 * calling thread.
1137 */
1138DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1139{
1140 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1141#ifdef VBOX_STRICT
1142# ifdef IEM_WITH_CODE_TLB
1143 NOREF(pVCpu);
1144# else
1145 pVCpu->iem.s.cbOpcode = 0;
1146# endif
1147#else
1148 NOREF(pVCpu);
1149#endif
1150}
1151
1152
1153/**
1154 * Initializes the decoder state.
1155 *
1156 * iemReInitDecoder is mostly a copy of this function.
1157 *
1158 * @param pVCpu The cross context virtual CPU structure of the
1159 * calling thread.
1160 * @param fBypassHandlers Whether to bypass access handlers.
1161 */
1162DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1163{
1164 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1165 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1166
1167#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1168 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1169 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1170 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1171 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1172 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1173 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1174 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1175 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1176#endif
1177
1178#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1179 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1180#endif
1181 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1182 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1183 pVCpu->iem.s.enmCpuMode = enmMode;
1184 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1185 pVCpu->iem.s.enmEffAddrMode = enmMode;
1186 if (enmMode != IEMMODE_64BIT)
1187 {
1188 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1189 pVCpu->iem.s.enmEffOpSize = enmMode;
1190 }
1191 else
1192 {
1193 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1194 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1195 }
1196 pVCpu->iem.s.fPrefixes = 0;
1197 pVCpu->iem.s.uRexReg = 0;
1198 pVCpu->iem.s.uRexB = 0;
1199 pVCpu->iem.s.uRexIndex = 0;
1200 pVCpu->iem.s.idxPrefix = 0;
1201 pVCpu->iem.s.uVex3rdReg = 0;
1202 pVCpu->iem.s.uVexLength = 0;
1203 pVCpu->iem.s.fEvexStuff = 0;
1204 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1205#ifdef IEM_WITH_CODE_TLB
1206 pVCpu->iem.s.pbInstrBuf = NULL;
1207 pVCpu->iem.s.offInstrNextByte = 0;
1208 pVCpu->iem.s.offCurInstrStart = 0;
1209# ifdef VBOX_STRICT
1210 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1211 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1212 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1213# endif
1214#else
1215 pVCpu->iem.s.offOpcode = 0;
1216 pVCpu->iem.s.cbOpcode = 0;
1217#endif
1218 pVCpu->iem.s.offModRm = 0;
1219 pVCpu->iem.s.cActiveMappings = 0;
1220 pVCpu->iem.s.iNextMapping = 0;
1221 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1222 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1223#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1224 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1225 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1226 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1227 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1228 if (!pVCpu->iem.s.fInPatchCode)
1229 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1230#endif
1231
1232#ifdef DBGFTRACE_ENABLED
1233 switch (enmMode)
1234 {
1235 case IEMMODE_64BIT:
1236 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1237 break;
1238 case IEMMODE_32BIT:
1239 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1240 break;
1241 case IEMMODE_16BIT:
1242 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1243 break;
1244 }
1245#endif
1246}
1247
1248
1249/**
1250 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1251 *
1252 * This is mostly a copy of iemInitDecoder.
1253 *
1254 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1255 */
1256DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1257{
1258 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1259
1260#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1261 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1262 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1263 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1264 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1265 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1266 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1267 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1268 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1269#endif
1270
1271 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1272 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1273 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1274 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1275 pVCpu->iem.s.enmEffAddrMode = enmMode;
1276 if (enmMode != IEMMODE_64BIT)
1277 {
1278 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1279 pVCpu->iem.s.enmEffOpSize = enmMode;
1280 }
1281 else
1282 {
1283 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1284 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1285 }
1286 pVCpu->iem.s.fPrefixes = 0;
1287 pVCpu->iem.s.uRexReg = 0;
1288 pVCpu->iem.s.uRexB = 0;
1289 pVCpu->iem.s.uRexIndex = 0;
1290 pVCpu->iem.s.idxPrefix = 0;
1291 pVCpu->iem.s.uVex3rdReg = 0;
1292 pVCpu->iem.s.uVexLength = 0;
1293 pVCpu->iem.s.fEvexStuff = 0;
1294 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1295#ifdef IEM_WITH_CODE_TLB
1296 if (pVCpu->iem.s.pbInstrBuf)
1297 {
1298 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1299 - pVCpu->iem.s.uInstrBufPc;
1300 if (off < pVCpu->iem.s.cbInstrBufTotal)
1301 {
1302 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1303 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1304 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1305 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1306 else
1307 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1308 }
1309 else
1310 {
1311 pVCpu->iem.s.pbInstrBuf = NULL;
1312 pVCpu->iem.s.offInstrNextByte = 0;
1313 pVCpu->iem.s.offCurInstrStart = 0;
1314 pVCpu->iem.s.cbInstrBuf = 0;
1315 pVCpu->iem.s.cbInstrBufTotal = 0;
1316 }
1317 }
1318 else
1319 {
1320 pVCpu->iem.s.offInstrNextByte = 0;
1321 pVCpu->iem.s.offCurInstrStart = 0;
1322 pVCpu->iem.s.cbInstrBuf = 0;
1323 pVCpu->iem.s.cbInstrBufTotal = 0;
1324 }
1325#else
1326 pVCpu->iem.s.cbOpcode = 0;
1327 pVCpu->iem.s.offOpcode = 0;
1328#endif
1329 pVCpu->iem.s.offModRm = 0;
1330 Assert(pVCpu->iem.s.cActiveMappings == 0);
1331 pVCpu->iem.s.iNextMapping = 0;
1332 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1333 Assert(pVCpu->iem.s.fBypassHandlers == false);
1334#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1335 if (!pVCpu->iem.s.fInPatchCode)
1336 { /* likely */ }
1337 else
1338 {
1339 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1340 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1341 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1342 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1343 if (!pVCpu->iem.s.fInPatchCode)
1344 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1345 }
1346#endif
1347
1348#ifdef DBGFTRACE_ENABLED
1349 switch (enmMode)
1350 {
1351 case IEMMODE_64BIT:
1352 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1353 break;
1354 case IEMMODE_32BIT:
1355 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1356 break;
1357 case IEMMODE_16BIT:
1358 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1359 break;
1360 }
1361#endif
1362}
1363
1364
1365
1366/**
1367 * Prefetch opcodes the first time when starting executing.
1368 *
1369 * @returns Strict VBox status code.
1370 * @param pVCpu The cross context virtual CPU structure of the
1371 * calling thread.
1372 * @param fBypassHandlers Whether to bypass access handlers.
1373 */
1374IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1375{
1376 iemInitDecoder(pVCpu, fBypassHandlers);
1377
1378#ifdef IEM_WITH_CODE_TLB
1379 /** @todo Do ITLB lookup here. */
1380
1381#else /* !IEM_WITH_CODE_TLB */
1382
1383 /*
1384 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1385 *
1386 * First translate CS:rIP to a physical address.
1387 */
1388 uint32_t cbToTryRead;
1389 RTGCPTR GCPtrPC;
1390 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1391 {
1392 cbToTryRead = PAGE_SIZE;
1393 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1394 if (IEM_IS_CANONICAL(GCPtrPC))
1395 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1396 else
1397 return iemRaiseGeneralProtectionFault0(pVCpu);
1398 }
1399 else
1400 {
1401 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1402 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1403 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1404 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1405 else
1406 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1407 if (cbToTryRead) { /* likely */ }
1408 else /* overflowed */
1409 {
1410 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1411 cbToTryRead = UINT32_MAX;
1412 }
1413 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1414 Assert(GCPtrPC <= UINT32_MAX);
1415 }
1416
1417# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1418 /* Allow interpretation of patch manager code blocks since they can for
1419 instance throw #PFs for perfectly good reasons. */
1420 if (pVCpu->iem.s.fInPatchCode)
1421 {
1422 size_t cbRead = 0;
1423 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1424 AssertRCReturn(rc, rc);
1425 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1426 return VINF_SUCCESS;
1427 }
1428# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1429
1430 RTGCPHYS GCPhys;
1431 uint64_t fFlags;
1432 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1433 if (RT_SUCCESS(rc)) { /* probable */ }
1434 else
1435 {
1436 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1437 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1438 }
1439 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1440 else
1441 {
1442 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1443 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1444 }
1445 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1446 else
1447 {
1448 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1449 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1450 }
1451 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1452 /** @todo Check reserved bits and such stuff. PGM is better at doing
1453 * that, so do it when implementing the guest virtual address
1454 * TLB... */
1455
1456 /*
1457 * Read the bytes at this address.
1458 */
1459 PVM pVM = pVCpu->CTX_SUFF(pVM);
1460# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1461 size_t cbActual;
1462 if ( PATMIsEnabled(pVM)
1463 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1464 {
1465 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1466 Assert(cbActual > 0);
1467 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1468 }
1469 else
1470# endif
1471 {
1472 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1473 if (cbToTryRead > cbLeftOnPage)
1474 cbToTryRead = cbLeftOnPage;
1475 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1476 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1477
1478 if (!pVCpu->iem.s.fBypassHandlers)
1479 {
1480 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1481 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1482 { /* likely */ }
1483 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1484 {
1485 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1486 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1487 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1488 }
1489 else
1490 {
1491 Log((RT_SUCCESS(rcStrict)
1492 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1493 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1494 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1495 return rcStrict;
1496 }
1497 }
1498 else
1499 {
1500 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1501 if (RT_SUCCESS(rc))
1502 { /* likely */ }
1503 else
1504 {
1505 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1506 GCPtrPC, GCPhys, rc, cbToTryRead));
1507 return rc;
1508 }
1509 }
1510 pVCpu->iem.s.cbOpcode = cbToTryRead;
1511 }
1512#endif /* !IEM_WITH_CODE_TLB */
1513 return VINF_SUCCESS;
1514}
1515
1516
1517/**
1518 * Invalidates the IEM TLBs.
1519 *
1520 * This is called internally as well as by PGM when moving GC mappings.
1521 *
1522 * @returns
1523 * @param pVCpu The cross context virtual CPU structure of the calling
1524 * thread.
1525 * @param fVmm Set when PGM calls us with a remapping.
1526 */
1527VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1528{
1529#ifdef IEM_WITH_CODE_TLB
1530 pVCpu->iem.s.cbInstrBufTotal = 0;
1531 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1532 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1533 { /* very likely */ }
1534 else
1535 {
1536 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1537 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1538 while (i-- > 0)
1539 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1540 }
1541#endif
1542
1543#ifdef IEM_WITH_DATA_TLB
1544 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1545 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1546 { /* very likely */ }
1547 else
1548 {
1549 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1550 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1551 while (i-- > 0)
1552 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1553 }
1554#endif
1555 NOREF(pVCpu); NOREF(fVmm);
1556}
1557
1558
1559/**
1560 * Invalidates a page in the TLBs.
1561 *
1562 * @param pVCpu The cross context virtual CPU structure of the calling
1563 * thread.
1564 * @param GCPtr The address of the page to invalidate
1565 */
1566VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1567{
1568#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1569 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1570 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1571 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1572 uintptr_t idx = (uint8_t)GCPtr;
1573
1574# ifdef IEM_WITH_CODE_TLB
1575 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1576 {
1577 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1578 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1579 pVCpu->iem.s.cbInstrBufTotal = 0;
1580 }
1581# endif
1582
1583# ifdef IEM_WITH_DATA_TLB
1584 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1585 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1586# endif
1587#else
1588 NOREF(pVCpu); NOREF(GCPtr);
1589#endif
1590}
1591
1592
1593/**
1594 * Invalidates the host physical aspects of the IEM TLBs.
1595 *
1596 * This is called internally as well as by PGM when moving GC mappings.
1597 *
1598 * @param pVCpu The cross context virtual CPU structure of the calling
1599 * thread.
1600 */
1601VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1602{
1603#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1604 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1605
1606# ifdef IEM_WITH_CODE_TLB
1607 pVCpu->iem.s.cbInstrBufTotal = 0;
1608# endif
1609 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1610 if (uTlbPhysRev != 0)
1611 {
1612 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1613 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1614 }
1615 else
1616 {
1617 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1618 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1619
1620 unsigned i;
1621# ifdef IEM_WITH_CODE_TLB
1622 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1623 while (i-- > 0)
1624 {
1625 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1626 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1627 }
1628# endif
1629# ifdef IEM_WITH_DATA_TLB
1630 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1631 while (i-- > 0)
1632 {
1633 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1634 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1635 }
1636# endif
1637 }
1638#else
1639 NOREF(pVCpu);
1640#endif
1641}
1642
1643
1644/**
1645 * Invalidates the host physical aspects of the IEM TLBs.
1646 *
1647 * This is called internally as well as by PGM when moving GC mappings.
1648 *
1649 * @param pVM The cross context VM structure.
1650 *
1651 * @remarks Caller holds the PGM lock.
1652 */
1653VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1654{
1655 RT_NOREF_PV(pVM);
1656}
1657
1658#ifdef IEM_WITH_CODE_TLB
1659
1660/**
1661 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1662 * failure and jumps.
1663 *
1664 * We end up here for a number of reasons:
1665 * - pbInstrBuf isn't yet initialized.
1666 * - Advancing beyond the buffer boundrary (e.g. cross page).
1667 * - Advancing beyond the CS segment limit.
1668 * - Fetching from non-mappable page (e.g. MMIO).
1669 *
1670 * @param pVCpu The cross context virtual CPU structure of the
1671 * calling thread.
1672 * @param pvDst Where to return the bytes.
1673 * @param cbDst Number of bytes to read.
1674 *
1675 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1676 */
1677IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1678{
1679#ifdef IN_RING3
1680 for (;;)
1681 {
1682 Assert(cbDst <= 8);
1683 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1684
1685 /*
1686 * We might have a partial buffer match, deal with that first to make the
1687 * rest simpler. This is the first part of the cross page/buffer case.
1688 */
1689 if (pVCpu->iem.s.pbInstrBuf != NULL)
1690 {
1691 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1692 {
1693 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1694 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1695 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1696
1697 cbDst -= cbCopy;
1698 pvDst = (uint8_t *)pvDst + cbCopy;
1699 offBuf += cbCopy;
1700 pVCpu->iem.s.offInstrNextByte += offBuf;
1701 }
1702 }
1703
1704 /*
1705 * Check segment limit, figuring how much we're allowed to access at this point.
1706 *
1707 * We will fault immediately if RIP is past the segment limit / in non-canonical
1708 * territory. If we do continue, there are one or more bytes to read before we
1709 * end up in trouble and we need to do that first before faulting.
1710 */
1711 RTGCPTR GCPtrFirst;
1712 uint32_t cbMaxRead;
1713 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1714 {
1715 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1716 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1717 { /* likely */ }
1718 else
1719 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1720 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1721 }
1722 else
1723 {
1724 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1725 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1726 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1727 { /* likely */ }
1728 else
1729 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1730 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1731 if (cbMaxRead != 0)
1732 { /* likely */ }
1733 else
1734 {
1735 /* Overflowed because address is 0 and limit is max. */
1736 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1737 cbMaxRead = X86_PAGE_SIZE;
1738 }
1739 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1740 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1741 if (cbMaxRead2 < cbMaxRead)
1742 cbMaxRead = cbMaxRead2;
1743 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1744 }
1745
1746 /*
1747 * Get the TLB entry for this piece of code.
1748 */
1749 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1750 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1751 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1752 if (pTlbe->uTag == uTag)
1753 {
1754 /* likely when executing lots of code, otherwise unlikely */
1755# ifdef VBOX_WITH_STATISTICS
1756 pVCpu->iem.s.CodeTlb.cTlbHits++;
1757# endif
1758 }
1759 else
1760 {
1761 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1762# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1763 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1764 {
1765 pTlbe->uTag = uTag;
1766 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1767 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1768 pTlbe->GCPhys = NIL_RTGCPHYS;
1769 pTlbe->pbMappingR3 = NULL;
1770 }
1771 else
1772# endif
1773 {
1774 RTGCPHYS GCPhys;
1775 uint64_t fFlags;
1776 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1777 if (RT_FAILURE(rc))
1778 {
1779 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1780 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1781 }
1782
1783 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1784 pTlbe->uTag = uTag;
1785 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1786 pTlbe->GCPhys = GCPhys;
1787 pTlbe->pbMappingR3 = NULL;
1788 }
1789 }
1790
1791 /*
1792 * Check TLB page table level access flags.
1793 */
1794 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1795 {
1796 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1797 {
1798 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1799 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1800 }
1801 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1802 {
1803 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1804 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1805 }
1806 }
1807
1808# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1809 /*
1810 * Allow interpretation of patch manager code blocks since they can for
1811 * instance throw #PFs for perfectly good reasons.
1812 */
1813 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1814 { /* no unlikely */ }
1815 else
1816 {
1817 /** @todo Could be optimized this a little in ring-3 if we liked. */
1818 size_t cbRead = 0;
1819 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1820 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1821 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1822 return;
1823 }
1824# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1825
1826 /*
1827 * Look up the physical page info if necessary.
1828 */
1829 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1830 { /* not necessary */ }
1831 else
1832 {
1833 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1834 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1835 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1836 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1837 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1838 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1839 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1840 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1841 }
1842
1843# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1844 /*
1845 * Try do a direct read using the pbMappingR3 pointer.
1846 */
1847 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1848 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1849 {
1850 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1851 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1852 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1853 {
1854 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1855 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1856 }
1857 else
1858 {
1859 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1860 Assert(cbInstr < cbMaxRead);
1861 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1862 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1863 }
1864 if (cbDst <= cbMaxRead)
1865 {
1866 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1867 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1868 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1869 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1870 return;
1871 }
1872 pVCpu->iem.s.pbInstrBuf = NULL;
1873
1874 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1875 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1876 }
1877 else
1878# endif
1879#if 0
1880 /*
1881 * If there is no special read handling, so we can read a bit more and
1882 * put it in the prefetch buffer.
1883 */
1884 if ( cbDst < cbMaxRead
1885 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1886 {
1887 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1888 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1889 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1890 { /* likely */ }
1891 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1892 {
1893 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1894 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1895 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1896 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1897 }
1898 else
1899 {
1900 Log((RT_SUCCESS(rcStrict)
1901 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1902 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1903 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1904 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1905 }
1906 }
1907 /*
1908 * Special read handling, so only read exactly what's needed.
1909 * This is a highly unlikely scenario.
1910 */
1911 else
1912#endif
1913 {
1914 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1915 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1916 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1917 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1918 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1919 { /* likely */ }
1920 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1921 {
1922 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1923 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1924 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1925 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1926 }
1927 else
1928 {
1929 Log((RT_SUCCESS(rcStrict)
1930 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1931 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1932 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1933 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1934 }
1935 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1936 if (cbToRead == cbDst)
1937 return;
1938 }
1939
1940 /*
1941 * More to read, loop.
1942 */
1943 cbDst -= cbMaxRead;
1944 pvDst = (uint8_t *)pvDst + cbMaxRead;
1945 }
1946#else
1947 RT_NOREF(pvDst, cbDst);
1948 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1949#endif
1950}
1951
1952#else
1953
1954/**
1955 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1956 * exception if it fails.
1957 *
1958 * @returns Strict VBox status code.
1959 * @param pVCpu The cross context virtual CPU structure of the
1960 * calling thread.
1961 * @param cbMin The minimum number of bytes relative offOpcode
1962 * that must be read.
1963 */
1964IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1965{
1966 /*
1967 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1968 *
1969 * First translate CS:rIP to a physical address.
1970 */
1971 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1972 uint32_t cbToTryRead;
1973 RTGCPTR GCPtrNext;
1974 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1975 {
1976 cbToTryRead = PAGE_SIZE;
1977 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1978 if (!IEM_IS_CANONICAL(GCPtrNext))
1979 return iemRaiseGeneralProtectionFault0(pVCpu);
1980 }
1981 else
1982 {
1983 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1984 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1985 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1986 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1987 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1988 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1989 if (!cbToTryRead) /* overflowed */
1990 {
1991 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1992 cbToTryRead = UINT32_MAX;
1993 /** @todo check out wrapping around the code segment. */
1994 }
1995 if (cbToTryRead < cbMin - cbLeft)
1996 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1997 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1998 }
1999
2000 /* Only read up to the end of the page, and make sure we don't read more
2001 than the opcode buffer can hold. */
2002 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2003 if (cbToTryRead > cbLeftOnPage)
2004 cbToTryRead = cbLeftOnPage;
2005 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2006 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2007/** @todo r=bird: Convert assertion into undefined opcode exception? */
2008 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2009
2010# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2011 /* Allow interpretation of patch manager code blocks since they can for
2012 instance throw #PFs for perfectly good reasons. */
2013 if (pVCpu->iem.s.fInPatchCode)
2014 {
2015 size_t cbRead = 0;
2016 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2017 AssertRCReturn(rc, rc);
2018 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2019 return VINF_SUCCESS;
2020 }
2021# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2022
2023 RTGCPHYS GCPhys;
2024 uint64_t fFlags;
2025 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2026 if (RT_FAILURE(rc))
2027 {
2028 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2029 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2030 }
2031 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2032 {
2033 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2034 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2035 }
2036 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2037 {
2038 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2039 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2040 }
2041 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2042 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2043 /** @todo Check reserved bits and such stuff. PGM is better at doing
2044 * that, so do it when implementing the guest virtual address
2045 * TLB... */
2046
2047 /*
2048 * Read the bytes at this address.
2049 *
2050 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2051 * and since PATM should only patch the start of an instruction there
2052 * should be no need to check again here.
2053 */
2054 if (!pVCpu->iem.s.fBypassHandlers)
2055 {
2056 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2057 cbToTryRead, PGMACCESSORIGIN_IEM);
2058 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2059 { /* likely */ }
2060 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2061 {
2062 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2063 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2064 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2065 }
2066 else
2067 {
2068 Log((RT_SUCCESS(rcStrict)
2069 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2070 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2071 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2072 return rcStrict;
2073 }
2074 }
2075 else
2076 {
2077 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2078 if (RT_SUCCESS(rc))
2079 { /* likely */ }
2080 else
2081 {
2082 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2083 return rc;
2084 }
2085 }
2086 pVCpu->iem.s.cbOpcode += cbToTryRead;
2087 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2088
2089 return VINF_SUCCESS;
2090}
2091
2092#endif /* !IEM_WITH_CODE_TLB */
2093#ifndef IEM_WITH_SETJMP
2094
2095/**
2096 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2097 *
2098 * @returns Strict VBox status code.
2099 * @param pVCpu The cross context virtual CPU structure of the
2100 * calling thread.
2101 * @param pb Where to return the opcode byte.
2102 */
2103DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2104{
2105 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2106 if (rcStrict == VINF_SUCCESS)
2107 {
2108 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2109 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2110 pVCpu->iem.s.offOpcode = offOpcode + 1;
2111 }
2112 else
2113 *pb = 0;
2114 return rcStrict;
2115}
2116
2117
2118/**
2119 * Fetches the next opcode byte.
2120 *
2121 * @returns Strict VBox status code.
2122 * @param pVCpu The cross context virtual CPU structure of the
2123 * calling thread.
2124 * @param pu8 Where to return the opcode byte.
2125 */
2126DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2127{
2128 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2129 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2130 {
2131 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2132 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2133 return VINF_SUCCESS;
2134 }
2135 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2136}
2137
2138#else /* IEM_WITH_SETJMP */
2139
2140/**
2141 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2142 *
2143 * @returns The opcode byte.
2144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2145 */
2146DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2147{
2148# ifdef IEM_WITH_CODE_TLB
2149 uint8_t u8;
2150 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2151 return u8;
2152# else
2153 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2154 if (rcStrict == VINF_SUCCESS)
2155 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2156 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2157# endif
2158}
2159
2160
2161/**
2162 * Fetches the next opcode byte, longjmp on error.
2163 *
2164 * @returns The opcode byte.
2165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2166 */
2167DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2168{
2169# ifdef IEM_WITH_CODE_TLB
2170 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2171 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2172 if (RT_LIKELY( pbBuf != NULL
2173 && offBuf < pVCpu->iem.s.cbInstrBuf))
2174 {
2175 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2176 return pbBuf[offBuf];
2177 }
2178# else
2179 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2180 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2181 {
2182 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2183 return pVCpu->iem.s.abOpcode[offOpcode];
2184 }
2185# endif
2186 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2187}
2188
2189#endif /* IEM_WITH_SETJMP */
2190
2191/**
2192 * Fetches the next opcode byte, returns automatically on failure.
2193 *
2194 * @param a_pu8 Where to return the opcode byte.
2195 * @remark Implicitly references pVCpu.
2196 */
2197#ifndef IEM_WITH_SETJMP
2198# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2199 do \
2200 { \
2201 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2202 if (rcStrict2 == VINF_SUCCESS) \
2203 { /* likely */ } \
2204 else \
2205 return rcStrict2; \
2206 } while (0)
2207#else
2208# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2209#endif /* IEM_WITH_SETJMP */
2210
2211
2212#ifndef IEM_WITH_SETJMP
2213/**
2214 * Fetches the next signed byte from the opcode stream.
2215 *
2216 * @returns Strict VBox status code.
2217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2218 * @param pi8 Where to return the signed byte.
2219 */
2220DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2221{
2222 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2223}
2224#endif /* !IEM_WITH_SETJMP */
2225
2226
2227/**
2228 * Fetches the next signed byte from the opcode stream, returning automatically
2229 * on failure.
2230 *
2231 * @param a_pi8 Where to return the signed byte.
2232 * @remark Implicitly references pVCpu.
2233 */
2234#ifndef IEM_WITH_SETJMP
2235# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2236 do \
2237 { \
2238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2239 if (rcStrict2 != VINF_SUCCESS) \
2240 return rcStrict2; \
2241 } while (0)
2242#else /* IEM_WITH_SETJMP */
2243# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2244
2245#endif /* IEM_WITH_SETJMP */
2246
2247#ifndef IEM_WITH_SETJMP
2248
2249/**
2250 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2251 *
2252 * @returns Strict VBox status code.
2253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2254 * @param pu16 Where to return the opcode dword.
2255 */
2256DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2257{
2258 uint8_t u8;
2259 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2260 if (rcStrict == VINF_SUCCESS)
2261 *pu16 = (int8_t)u8;
2262 return rcStrict;
2263}
2264
2265
2266/**
2267 * Fetches the next signed byte from the opcode stream, extending it to
2268 * unsigned 16-bit.
2269 *
2270 * @returns Strict VBox status code.
2271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2272 * @param pu16 Where to return the unsigned word.
2273 */
2274DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2275{
2276 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2277 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2278 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2279
2280 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2281 pVCpu->iem.s.offOpcode = offOpcode + 1;
2282 return VINF_SUCCESS;
2283}
2284
2285#endif /* !IEM_WITH_SETJMP */
2286
2287/**
2288 * Fetches the next signed byte from the opcode stream and sign-extending it to
2289 * a word, returning automatically on failure.
2290 *
2291 * @param a_pu16 Where to return the word.
2292 * @remark Implicitly references pVCpu.
2293 */
2294#ifndef IEM_WITH_SETJMP
2295# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2296 do \
2297 { \
2298 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2299 if (rcStrict2 != VINF_SUCCESS) \
2300 return rcStrict2; \
2301 } while (0)
2302#else
2303# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2304#endif
2305
2306#ifndef IEM_WITH_SETJMP
2307
2308/**
2309 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2310 *
2311 * @returns Strict VBox status code.
2312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2313 * @param pu32 Where to return the opcode dword.
2314 */
2315DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2316{
2317 uint8_t u8;
2318 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2319 if (rcStrict == VINF_SUCCESS)
2320 *pu32 = (int8_t)u8;
2321 return rcStrict;
2322}
2323
2324
2325/**
2326 * Fetches the next signed byte from the opcode stream, extending it to
2327 * unsigned 32-bit.
2328 *
2329 * @returns Strict VBox status code.
2330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2331 * @param pu32 Where to return the unsigned dword.
2332 */
2333DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2334{
2335 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2336 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2337 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2338
2339 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2340 pVCpu->iem.s.offOpcode = offOpcode + 1;
2341 return VINF_SUCCESS;
2342}
2343
2344#endif /* !IEM_WITH_SETJMP */
2345
2346/**
2347 * Fetches the next signed byte from the opcode stream and sign-extending it to
2348 * a word, returning automatically on failure.
2349 *
2350 * @param a_pu32 Where to return the word.
2351 * @remark Implicitly references pVCpu.
2352 */
2353#ifndef IEM_WITH_SETJMP
2354#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2355 do \
2356 { \
2357 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2358 if (rcStrict2 != VINF_SUCCESS) \
2359 return rcStrict2; \
2360 } while (0)
2361#else
2362# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2363#endif
2364
2365#ifndef IEM_WITH_SETJMP
2366
2367/**
2368 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2369 *
2370 * @returns Strict VBox status code.
2371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2372 * @param pu64 Where to return the opcode qword.
2373 */
2374DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2375{
2376 uint8_t u8;
2377 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2378 if (rcStrict == VINF_SUCCESS)
2379 *pu64 = (int8_t)u8;
2380 return rcStrict;
2381}
2382
2383
2384/**
2385 * Fetches the next signed byte from the opcode stream, extending it to
2386 * unsigned 64-bit.
2387 *
2388 * @returns Strict VBox status code.
2389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2390 * @param pu64 Where to return the unsigned qword.
2391 */
2392DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2393{
2394 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2395 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2396 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2397
2398 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2399 pVCpu->iem.s.offOpcode = offOpcode + 1;
2400 return VINF_SUCCESS;
2401}
2402
2403#endif /* !IEM_WITH_SETJMP */
2404
2405
2406/**
2407 * Fetches the next signed byte from the opcode stream and sign-extending it to
2408 * a word, returning automatically on failure.
2409 *
2410 * @param a_pu64 Where to return the word.
2411 * @remark Implicitly references pVCpu.
2412 */
2413#ifndef IEM_WITH_SETJMP
2414# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2415 do \
2416 { \
2417 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2418 if (rcStrict2 != VINF_SUCCESS) \
2419 return rcStrict2; \
2420 } while (0)
2421#else
2422# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2423#endif
2424
2425
2426#ifndef IEM_WITH_SETJMP
2427/**
2428 * Fetches the next opcode byte.
2429 *
2430 * @returns Strict VBox status code.
2431 * @param pVCpu The cross context virtual CPU structure of the
2432 * calling thread.
2433 * @param pu8 Where to return the opcode byte.
2434 */
2435DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2436{
2437 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2438 pVCpu->iem.s.offModRm = offOpcode;
2439 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2440 {
2441 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2442 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2443 return VINF_SUCCESS;
2444 }
2445 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2446}
2447#else /* IEM_WITH_SETJMP */
2448/**
2449 * Fetches the next opcode byte, longjmp on error.
2450 *
2451 * @returns The opcode byte.
2452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2453 */
2454DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2455{
2456# ifdef IEM_WITH_CODE_TLB
2457 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2458 pVCpu->iem.s.offModRm = offBuf;
2459 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2460 if (RT_LIKELY( pbBuf != NULL
2461 && offBuf < pVCpu->iem.s.cbInstrBuf))
2462 {
2463 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2464 return pbBuf[offBuf];
2465 }
2466# else
2467 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2468 pVCpu->iem.s.offModRm = offOpcode;
2469 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2470 {
2471 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2472 return pVCpu->iem.s.abOpcode[offOpcode];
2473 }
2474# endif
2475 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2476}
2477#endif /* IEM_WITH_SETJMP */
2478
2479/**
2480 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2481 * on failure.
2482 *
2483 * Will note down the position of the ModR/M byte for VT-x exits.
2484 *
2485 * @param a_pbRm Where to return the RM opcode byte.
2486 * @remark Implicitly references pVCpu.
2487 */
2488#ifndef IEM_WITH_SETJMP
2489# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2490 do \
2491 { \
2492 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2493 if (rcStrict2 == VINF_SUCCESS) \
2494 { /* likely */ } \
2495 else \
2496 return rcStrict2; \
2497 } while (0)
2498#else
2499# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2500#endif /* IEM_WITH_SETJMP */
2501
2502
2503#ifndef IEM_WITH_SETJMP
2504
2505/**
2506 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2507 *
2508 * @returns Strict VBox status code.
2509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2510 * @param pu16 Where to return the opcode word.
2511 */
2512DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2513{
2514 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2515 if (rcStrict == VINF_SUCCESS)
2516 {
2517 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2518# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2519 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2520# else
2521 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2522# endif
2523 pVCpu->iem.s.offOpcode = offOpcode + 2;
2524 }
2525 else
2526 *pu16 = 0;
2527 return rcStrict;
2528}
2529
2530
2531/**
2532 * Fetches the next opcode word.
2533 *
2534 * @returns Strict VBox status code.
2535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2536 * @param pu16 Where to return the opcode word.
2537 */
2538DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2539{
2540 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2541 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2542 {
2543 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2544# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2545 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2546# else
2547 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2548# endif
2549 return VINF_SUCCESS;
2550 }
2551 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2552}
2553
2554#else /* IEM_WITH_SETJMP */
2555
2556/**
2557 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2558 *
2559 * @returns The opcode word.
2560 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2561 */
2562DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2563{
2564# ifdef IEM_WITH_CODE_TLB
2565 uint16_t u16;
2566 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2567 return u16;
2568# else
2569 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2570 if (rcStrict == VINF_SUCCESS)
2571 {
2572 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2573 pVCpu->iem.s.offOpcode += 2;
2574# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2575 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2576# else
2577 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2578# endif
2579 }
2580 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2581# endif
2582}
2583
2584
2585/**
2586 * Fetches the next opcode word, longjmp on error.
2587 *
2588 * @returns The opcode word.
2589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2590 */
2591DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2592{
2593# ifdef IEM_WITH_CODE_TLB
2594 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2595 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2596 if (RT_LIKELY( pbBuf != NULL
2597 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2598 {
2599 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2600# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2601 return *(uint16_t const *)&pbBuf[offBuf];
2602# else
2603 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2604# endif
2605 }
2606# else
2607 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2608 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2609 {
2610 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2611# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2612 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2613# else
2614 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2615# endif
2616 }
2617# endif
2618 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2619}
2620
2621#endif /* IEM_WITH_SETJMP */
2622
2623
2624/**
2625 * Fetches the next opcode word, returns automatically on failure.
2626 *
2627 * @param a_pu16 Where to return the opcode word.
2628 * @remark Implicitly references pVCpu.
2629 */
2630#ifndef IEM_WITH_SETJMP
2631# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2632 do \
2633 { \
2634 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2635 if (rcStrict2 != VINF_SUCCESS) \
2636 return rcStrict2; \
2637 } while (0)
2638#else
2639# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2640#endif
2641
2642#ifndef IEM_WITH_SETJMP
2643
2644/**
2645 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2646 *
2647 * @returns Strict VBox status code.
2648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2649 * @param pu32 Where to return the opcode double word.
2650 */
2651DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2652{
2653 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2654 if (rcStrict == VINF_SUCCESS)
2655 {
2656 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2657 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2658 pVCpu->iem.s.offOpcode = offOpcode + 2;
2659 }
2660 else
2661 *pu32 = 0;
2662 return rcStrict;
2663}
2664
2665
2666/**
2667 * Fetches the next opcode word, zero extending it to a double word.
2668 *
2669 * @returns Strict VBox status code.
2670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2671 * @param pu32 Where to return the opcode double word.
2672 */
2673DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2674{
2675 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2676 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2677 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2678
2679 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2680 pVCpu->iem.s.offOpcode = offOpcode + 2;
2681 return VINF_SUCCESS;
2682}
2683
2684#endif /* !IEM_WITH_SETJMP */
2685
2686
2687/**
2688 * Fetches the next opcode word and zero extends it to a double word, returns
2689 * automatically on failure.
2690 *
2691 * @param a_pu32 Where to return the opcode double word.
2692 * @remark Implicitly references pVCpu.
2693 */
2694#ifndef IEM_WITH_SETJMP
2695# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2696 do \
2697 { \
2698 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2699 if (rcStrict2 != VINF_SUCCESS) \
2700 return rcStrict2; \
2701 } while (0)
2702#else
2703# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2704#endif
2705
2706#ifndef IEM_WITH_SETJMP
2707
2708/**
2709 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2710 *
2711 * @returns Strict VBox status code.
2712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2713 * @param pu64 Where to return the opcode quad word.
2714 */
2715DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2716{
2717 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2718 if (rcStrict == VINF_SUCCESS)
2719 {
2720 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2721 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2722 pVCpu->iem.s.offOpcode = offOpcode + 2;
2723 }
2724 else
2725 *pu64 = 0;
2726 return rcStrict;
2727}
2728
2729
2730/**
2731 * Fetches the next opcode word, zero extending it to a quad word.
2732 *
2733 * @returns Strict VBox status code.
2734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2735 * @param pu64 Where to return the opcode quad word.
2736 */
2737DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2738{
2739 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2740 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2741 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2742
2743 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2744 pVCpu->iem.s.offOpcode = offOpcode + 2;
2745 return VINF_SUCCESS;
2746}
2747
2748#endif /* !IEM_WITH_SETJMP */
2749
2750/**
2751 * Fetches the next opcode word and zero extends it to a quad word, returns
2752 * automatically on failure.
2753 *
2754 * @param a_pu64 Where to return the opcode quad word.
2755 * @remark Implicitly references pVCpu.
2756 */
2757#ifndef IEM_WITH_SETJMP
2758# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2759 do \
2760 { \
2761 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2762 if (rcStrict2 != VINF_SUCCESS) \
2763 return rcStrict2; \
2764 } while (0)
2765#else
2766# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2767#endif
2768
2769
2770#ifndef IEM_WITH_SETJMP
2771/**
2772 * Fetches the next signed word from the opcode stream.
2773 *
2774 * @returns Strict VBox status code.
2775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2776 * @param pi16 Where to return the signed word.
2777 */
2778DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2779{
2780 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2781}
2782#endif /* !IEM_WITH_SETJMP */
2783
2784
2785/**
2786 * Fetches the next signed word from the opcode stream, returning automatically
2787 * on failure.
2788 *
2789 * @param a_pi16 Where to return the signed word.
2790 * @remark Implicitly references pVCpu.
2791 */
2792#ifndef IEM_WITH_SETJMP
2793# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2794 do \
2795 { \
2796 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2797 if (rcStrict2 != VINF_SUCCESS) \
2798 return rcStrict2; \
2799 } while (0)
2800#else
2801# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2802#endif
2803
2804#ifndef IEM_WITH_SETJMP
2805
2806/**
2807 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2808 *
2809 * @returns Strict VBox status code.
2810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2811 * @param pu32 Where to return the opcode dword.
2812 */
2813DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2814{
2815 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2816 if (rcStrict == VINF_SUCCESS)
2817 {
2818 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2819# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2820 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2821# else
2822 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2823 pVCpu->iem.s.abOpcode[offOpcode + 1],
2824 pVCpu->iem.s.abOpcode[offOpcode + 2],
2825 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2826# endif
2827 pVCpu->iem.s.offOpcode = offOpcode + 4;
2828 }
2829 else
2830 *pu32 = 0;
2831 return rcStrict;
2832}
2833
2834
2835/**
2836 * Fetches the next opcode dword.
2837 *
2838 * @returns Strict VBox status code.
2839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2840 * @param pu32 Where to return the opcode double word.
2841 */
2842DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2843{
2844 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2845 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2846 {
2847 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2848# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2849 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2850# else
2851 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2852 pVCpu->iem.s.abOpcode[offOpcode + 1],
2853 pVCpu->iem.s.abOpcode[offOpcode + 2],
2854 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2855# endif
2856 return VINF_SUCCESS;
2857 }
2858 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2859}
2860
2861#else /* !IEM_WITH_SETJMP */
2862
2863/**
2864 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2865 *
2866 * @returns The opcode dword.
2867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2868 */
2869DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2870{
2871# ifdef IEM_WITH_CODE_TLB
2872 uint32_t u32;
2873 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2874 return u32;
2875# else
2876 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2877 if (rcStrict == VINF_SUCCESS)
2878 {
2879 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2880 pVCpu->iem.s.offOpcode = offOpcode + 4;
2881# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2882 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2883# else
2884 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2885 pVCpu->iem.s.abOpcode[offOpcode + 1],
2886 pVCpu->iem.s.abOpcode[offOpcode + 2],
2887 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2888# endif
2889 }
2890 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2891# endif
2892}
2893
2894
2895/**
2896 * Fetches the next opcode dword, longjmp on error.
2897 *
2898 * @returns The opcode dword.
2899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2900 */
2901DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2902{
2903# ifdef IEM_WITH_CODE_TLB
2904 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2905 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2906 if (RT_LIKELY( pbBuf != NULL
2907 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2908 {
2909 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2910# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2911 return *(uint32_t const *)&pbBuf[offBuf];
2912# else
2913 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2914 pbBuf[offBuf + 1],
2915 pbBuf[offBuf + 2],
2916 pbBuf[offBuf + 3]);
2917# endif
2918 }
2919# else
2920 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2921 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2922 {
2923 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2924# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2925 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2926# else
2927 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2928 pVCpu->iem.s.abOpcode[offOpcode + 1],
2929 pVCpu->iem.s.abOpcode[offOpcode + 2],
2930 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2931# endif
2932 }
2933# endif
2934 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2935}
2936
2937#endif /* !IEM_WITH_SETJMP */
2938
2939
2940/**
2941 * Fetches the next opcode dword, returns automatically on failure.
2942 *
2943 * @param a_pu32 Where to return the opcode dword.
2944 * @remark Implicitly references pVCpu.
2945 */
2946#ifndef IEM_WITH_SETJMP
2947# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2948 do \
2949 { \
2950 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2951 if (rcStrict2 != VINF_SUCCESS) \
2952 return rcStrict2; \
2953 } while (0)
2954#else
2955# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2956#endif
2957
2958#ifndef IEM_WITH_SETJMP
2959
2960/**
2961 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2962 *
2963 * @returns Strict VBox status code.
2964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2965 * @param pu64 Where to return the opcode dword.
2966 */
2967DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2968{
2969 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2970 if (rcStrict == VINF_SUCCESS)
2971 {
2972 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2973 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2974 pVCpu->iem.s.abOpcode[offOpcode + 1],
2975 pVCpu->iem.s.abOpcode[offOpcode + 2],
2976 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2977 pVCpu->iem.s.offOpcode = offOpcode + 4;
2978 }
2979 else
2980 *pu64 = 0;
2981 return rcStrict;
2982}
2983
2984
2985/**
2986 * Fetches the next opcode dword, zero extending it to a quad word.
2987 *
2988 * @returns Strict VBox status code.
2989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2990 * @param pu64 Where to return the opcode quad word.
2991 */
2992DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2993{
2994 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2995 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2996 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2997
2998 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2999 pVCpu->iem.s.abOpcode[offOpcode + 1],
3000 pVCpu->iem.s.abOpcode[offOpcode + 2],
3001 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3002 pVCpu->iem.s.offOpcode = offOpcode + 4;
3003 return VINF_SUCCESS;
3004}
3005
3006#endif /* !IEM_WITH_SETJMP */
3007
3008
3009/**
3010 * Fetches the next opcode dword and zero extends it to a quad word, returns
3011 * automatically on failure.
3012 *
3013 * @param a_pu64 Where to return the opcode quad word.
3014 * @remark Implicitly references pVCpu.
3015 */
3016#ifndef IEM_WITH_SETJMP
3017# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3018 do \
3019 { \
3020 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3021 if (rcStrict2 != VINF_SUCCESS) \
3022 return rcStrict2; \
3023 } while (0)
3024#else
3025# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3026#endif
3027
3028
3029#ifndef IEM_WITH_SETJMP
3030/**
3031 * Fetches the next signed double word from the opcode stream.
3032 *
3033 * @returns Strict VBox status code.
3034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3035 * @param pi32 Where to return the signed double word.
3036 */
3037DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3038{
3039 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3040}
3041#endif
3042
3043/**
3044 * Fetches the next signed double word from the opcode stream, returning
3045 * automatically on failure.
3046 *
3047 * @param a_pi32 Where to return the signed double word.
3048 * @remark Implicitly references pVCpu.
3049 */
3050#ifndef IEM_WITH_SETJMP
3051# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3052 do \
3053 { \
3054 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3055 if (rcStrict2 != VINF_SUCCESS) \
3056 return rcStrict2; \
3057 } while (0)
3058#else
3059# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3060#endif
3061
3062#ifndef IEM_WITH_SETJMP
3063
3064/**
3065 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3066 *
3067 * @returns Strict VBox status code.
3068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3069 * @param pu64 Where to return the opcode qword.
3070 */
3071DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3072{
3073 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3074 if (rcStrict == VINF_SUCCESS)
3075 {
3076 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3077 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3078 pVCpu->iem.s.abOpcode[offOpcode + 1],
3079 pVCpu->iem.s.abOpcode[offOpcode + 2],
3080 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3081 pVCpu->iem.s.offOpcode = offOpcode + 4;
3082 }
3083 else
3084 *pu64 = 0;
3085 return rcStrict;
3086}
3087
3088
3089/**
3090 * Fetches the next opcode dword, sign extending it into a quad word.
3091 *
3092 * @returns Strict VBox status code.
3093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3094 * @param pu64 Where to return the opcode quad word.
3095 */
3096DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3097{
3098 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3099 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3100 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3101
3102 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3103 pVCpu->iem.s.abOpcode[offOpcode + 1],
3104 pVCpu->iem.s.abOpcode[offOpcode + 2],
3105 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3106 *pu64 = i32;
3107 pVCpu->iem.s.offOpcode = offOpcode + 4;
3108 return VINF_SUCCESS;
3109}
3110
3111#endif /* !IEM_WITH_SETJMP */
3112
3113
3114/**
3115 * Fetches the next opcode double word and sign extends it to a quad word,
3116 * returns automatically on failure.
3117 *
3118 * @param a_pu64 Where to return the opcode quad word.
3119 * @remark Implicitly references pVCpu.
3120 */
3121#ifndef IEM_WITH_SETJMP
3122# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3123 do \
3124 { \
3125 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3126 if (rcStrict2 != VINF_SUCCESS) \
3127 return rcStrict2; \
3128 } while (0)
3129#else
3130# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3131#endif
3132
3133#ifndef IEM_WITH_SETJMP
3134
3135/**
3136 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3137 *
3138 * @returns Strict VBox status code.
3139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3140 * @param pu64 Where to return the opcode qword.
3141 */
3142DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3143{
3144 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3145 if (rcStrict == VINF_SUCCESS)
3146 {
3147 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3148# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3149 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3150# else
3151 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3152 pVCpu->iem.s.abOpcode[offOpcode + 1],
3153 pVCpu->iem.s.abOpcode[offOpcode + 2],
3154 pVCpu->iem.s.abOpcode[offOpcode + 3],
3155 pVCpu->iem.s.abOpcode[offOpcode + 4],
3156 pVCpu->iem.s.abOpcode[offOpcode + 5],
3157 pVCpu->iem.s.abOpcode[offOpcode + 6],
3158 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3159# endif
3160 pVCpu->iem.s.offOpcode = offOpcode + 8;
3161 }
3162 else
3163 *pu64 = 0;
3164 return rcStrict;
3165}
3166
3167
3168/**
3169 * Fetches the next opcode qword.
3170 *
3171 * @returns Strict VBox status code.
3172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3173 * @param pu64 Where to return the opcode qword.
3174 */
3175DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3176{
3177 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3178 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3179 {
3180# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3181 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3182# else
3183 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3184 pVCpu->iem.s.abOpcode[offOpcode + 1],
3185 pVCpu->iem.s.abOpcode[offOpcode + 2],
3186 pVCpu->iem.s.abOpcode[offOpcode + 3],
3187 pVCpu->iem.s.abOpcode[offOpcode + 4],
3188 pVCpu->iem.s.abOpcode[offOpcode + 5],
3189 pVCpu->iem.s.abOpcode[offOpcode + 6],
3190 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3191# endif
3192 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3193 return VINF_SUCCESS;
3194 }
3195 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3196}
3197
3198#else /* IEM_WITH_SETJMP */
3199
3200/**
3201 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3202 *
3203 * @returns The opcode qword.
3204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3205 */
3206DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3207{
3208# ifdef IEM_WITH_CODE_TLB
3209 uint64_t u64;
3210 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3211 return u64;
3212# else
3213 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3214 if (rcStrict == VINF_SUCCESS)
3215 {
3216 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3217 pVCpu->iem.s.offOpcode = offOpcode + 8;
3218# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3219 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3220# else
3221 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3222 pVCpu->iem.s.abOpcode[offOpcode + 1],
3223 pVCpu->iem.s.abOpcode[offOpcode + 2],
3224 pVCpu->iem.s.abOpcode[offOpcode + 3],
3225 pVCpu->iem.s.abOpcode[offOpcode + 4],
3226 pVCpu->iem.s.abOpcode[offOpcode + 5],
3227 pVCpu->iem.s.abOpcode[offOpcode + 6],
3228 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3229# endif
3230 }
3231 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3232# endif
3233}
3234
3235
3236/**
3237 * Fetches the next opcode qword, longjmp on error.
3238 *
3239 * @returns The opcode qword.
3240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3241 */
3242DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3243{
3244# ifdef IEM_WITH_CODE_TLB
3245 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3246 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3247 if (RT_LIKELY( pbBuf != NULL
3248 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3249 {
3250 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3251# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3252 return *(uint64_t const *)&pbBuf[offBuf];
3253# else
3254 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3255 pbBuf[offBuf + 1],
3256 pbBuf[offBuf + 2],
3257 pbBuf[offBuf + 3],
3258 pbBuf[offBuf + 4],
3259 pbBuf[offBuf + 5],
3260 pbBuf[offBuf + 6],
3261 pbBuf[offBuf + 7]);
3262# endif
3263 }
3264# else
3265 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3266 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3267 {
3268 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3269# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3270 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3271# else
3272 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3273 pVCpu->iem.s.abOpcode[offOpcode + 1],
3274 pVCpu->iem.s.abOpcode[offOpcode + 2],
3275 pVCpu->iem.s.abOpcode[offOpcode + 3],
3276 pVCpu->iem.s.abOpcode[offOpcode + 4],
3277 pVCpu->iem.s.abOpcode[offOpcode + 5],
3278 pVCpu->iem.s.abOpcode[offOpcode + 6],
3279 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3280# endif
3281 }
3282# endif
3283 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3284}
3285
3286#endif /* IEM_WITH_SETJMP */
3287
3288/**
3289 * Fetches the next opcode quad word, returns automatically on failure.
3290 *
3291 * @param a_pu64 Where to return the opcode quad word.
3292 * @remark Implicitly references pVCpu.
3293 */
3294#ifndef IEM_WITH_SETJMP
3295# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3296 do \
3297 { \
3298 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3299 if (rcStrict2 != VINF_SUCCESS) \
3300 return rcStrict2; \
3301 } while (0)
3302#else
3303# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3304#endif
3305
3306
3307/** @name Misc Worker Functions.
3308 * @{
3309 */
3310
3311/**
3312 * Gets the exception class for the specified exception vector.
3313 *
3314 * @returns The class of the specified exception.
3315 * @param uVector The exception vector.
3316 */
3317IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3318{
3319 Assert(uVector <= X86_XCPT_LAST);
3320 switch (uVector)
3321 {
3322 case X86_XCPT_DE:
3323 case X86_XCPT_TS:
3324 case X86_XCPT_NP:
3325 case X86_XCPT_SS:
3326 case X86_XCPT_GP:
3327 case X86_XCPT_SX: /* AMD only */
3328 return IEMXCPTCLASS_CONTRIBUTORY;
3329
3330 case X86_XCPT_PF:
3331 case X86_XCPT_VE: /* Intel only */
3332 return IEMXCPTCLASS_PAGE_FAULT;
3333
3334 case X86_XCPT_DF:
3335 return IEMXCPTCLASS_DOUBLE_FAULT;
3336 }
3337 return IEMXCPTCLASS_BENIGN;
3338}
3339
3340
3341/**
3342 * Evaluates how to handle an exception caused during delivery of another event
3343 * (exception / interrupt).
3344 *
3345 * @returns How to handle the recursive exception.
3346 * @param pVCpu The cross context virtual CPU structure of the
3347 * calling thread.
3348 * @param fPrevFlags The flags of the previous event.
3349 * @param uPrevVector The vector of the previous event.
3350 * @param fCurFlags The flags of the current exception.
3351 * @param uCurVector The vector of the current exception.
3352 * @param pfXcptRaiseInfo Where to store additional information about the
3353 * exception condition. Optional.
3354 */
3355VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3356 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3357{
3358 /*
3359 * Only CPU exceptions can be raised while delivering other events, software interrupt
3360 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3361 */
3362 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3363 Assert(pVCpu); RT_NOREF(pVCpu);
3364 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3365
3366 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3367 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3368 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3369 {
3370 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3371 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3372 {
3373 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3374 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3375 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3376 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3377 {
3378 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3379 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3380 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3381 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3382 uCurVector, pVCpu->cpum.GstCtx.cr2));
3383 }
3384 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3385 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3386 {
3387 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3388 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3389 }
3390 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3391 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3392 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3393 {
3394 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3395 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3396 }
3397 }
3398 else
3399 {
3400 if (uPrevVector == X86_XCPT_NMI)
3401 {
3402 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3403 if (uCurVector == X86_XCPT_PF)
3404 {
3405 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3406 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3407 }
3408 }
3409 else if ( uPrevVector == X86_XCPT_AC
3410 && uCurVector == X86_XCPT_AC)
3411 {
3412 enmRaise = IEMXCPTRAISE_CPU_HANG;
3413 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3414 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3415 }
3416 }
3417 }
3418 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3419 {
3420 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3421 if (uCurVector == X86_XCPT_PF)
3422 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3423 }
3424 else
3425 {
3426 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3427 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3428 }
3429
3430 if (pfXcptRaiseInfo)
3431 *pfXcptRaiseInfo = fRaiseInfo;
3432 return enmRaise;
3433}
3434
3435
3436/**
3437 * Enters the CPU shutdown state initiated by a triple fault or other
3438 * unrecoverable conditions.
3439 *
3440 * @returns Strict VBox status code.
3441 * @param pVCpu The cross context virtual CPU structure of the
3442 * calling thread.
3443 */
3444IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3445{
3446 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3447 {
3448 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3449 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3450 }
3451
3452 RT_NOREF(pVCpu);
3453 return VINF_EM_TRIPLE_FAULT;
3454}
3455
3456
3457/**
3458 * Validates a new SS segment.
3459 *
3460 * @returns VBox strict status code.
3461 * @param pVCpu The cross context virtual CPU structure of the
3462 * calling thread.
3463 * @param NewSS The new SS selctor.
3464 * @param uCpl The CPL to load the stack for.
3465 * @param pDesc Where to return the descriptor.
3466 */
3467IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3468{
3469 /* Null selectors are not allowed (we're not called for dispatching
3470 interrupts with SS=0 in long mode). */
3471 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3472 {
3473 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3474 return iemRaiseTaskSwitchFault0(pVCpu);
3475 }
3476
3477 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3478 if ((NewSS & X86_SEL_RPL) != uCpl)
3479 {
3480 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3481 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3482 }
3483
3484 /*
3485 * Read the descriptor.
3486 */
3487 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3488 if (rcStrict != VINF_SUCCESS)
3489 return rcStrict;
3490
3491 /*
3492 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3493 */
3494 if (!pDesc->Legacy.Gen.u1DescType)
3495 {
3496 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3497 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3498 }
3499
3500 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3501 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3502 {
3503 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3504 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3505 }
3506 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3507 {
3508 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3509 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3510 }
3511
3512 /* Is it there? */
3513 /** @todo testcase: Is this checked before the canonical / limit check below? */
3514 if (!pDesc->Legacy.Gen.u1Present)
3515 {
3516 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3517 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3518 }
3519
3520 return VINF_SUCCESS;
3521}
3522
3523
3524/**
3525 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3526 * not.
3527 *
3528 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3529 */
3530#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3531# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3532#else
3533# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3534#endif
3535
3536/**
3537 * Updates the EFLAGS in the correct manner wrt. PATM.
3538 *
3539 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3540 * @param a_fEfl The new EFLAGS.
3541 */
3542#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3543# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3544#else
3545# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3546#endif
3547
3548
3549/** @} */
3550
3551/** @name Raising Exceptions.
3552 *
3553 * @{
3554 */
3555
3556
3557/**
3558 * Loads the specified stack far pointer from the TSS.
3559 *
3560 * @returns VBox strict status code.
3561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3562 * @param uCpl The CPL to load the stack for.
3563 * @param pSelSS Where to return the new stack segment.
3564 * @param puEsp Where to return the new stack pointer.
3565 */
3566IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3567{
3568 VBOXSTRICTRC rcStrict;
3569 Assert(uCpl < 4);
3570
3571 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3572 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3573 {
3574 /*
3575 * 16-bit TSS (X86TSS16).
3576 */
3577 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3578 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3579 {
3580 uint32_t off = uCpl * 4 + 2;
3581 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3582 {
3583 /** @todo check actual access pattern here. */
3584 uint32_t u32Tmp = 0; /* gcc maybe... */
3585 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3586 if (rcStrict == VINF_SUCCESS)
3587 {
3588 *puEsp = RT_LOWORD(u32Tmp);
3589 *pSelSS = RT_HIWORD(u32Tmp);
3590 return VINF_SUCCESS;
3591 }
3592 }
3593 else
3594 {
3595 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3596 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3597 }
3598 break;
3599 }
3600
3601 /*
3602 * 32-bit TSS (X86TSS32).
3603 */
3604 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3605 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3606 {
3607 uint32_t off = uCpl * 8 + 4;
3608 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3609 {
3610/** @todo check actual access pattern here. */
3611 uint64_t u64Tmp;
3612 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3613 if (rcStrict == VINF_SUCCESS)
3614 {
3615 *puEsp = u64Tmp & UINT32_MAX;
3616 *pSelSS = (RTSEL)(u64Tmp >> 32);
3617 return VINF_SUCCESS;
3618 }
3619 }
3620 else
3621 {
3622 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3623 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3624 }
3625 break;
3626 }
3627
3628 default:
3629 AssertFailed();
3630 rcStrict = VERR_IEM_IPE_4;
3631 break;
3632 }
3633
3634 *puEsp = 0; /* make gcc happy */
3635 *pSelSS = 0; /* make gcc happy */
3636 return rcStrict;
3637}
3638
3639
3640/**
3641 * Loads the specified stack pointer from the 64-bit TSS.
3642 *
3643 * @returns VBox strict status code.
3644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3645 * @param uCpl The CPL to load the stack for.
3646 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3647 * @param puRsp Where to return the new stack pointer.
3648 */
3649IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3650{
3651 Assert(uCpl < 4);
3652 Assert(uIst < 8);
3653 *puRsp = 0; /* make gcc happy */
3654
3655 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3656 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3657
3658 uint32_t off;
3659 if (uIst)
3660 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3661 else
3662 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3663 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3664 {
3665 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3666 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3667 }
3668
3669 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3670}
3671
3672
3673/**
3674 * Adjust the CPU state according to the exception being raised.
3675 *
3676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3677 * @param u8Vector The exception that has been raised.
3678 */
3679DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3680{
3681 switch (u8Vector)
3682 {
3683 case X86_XCPT_DB:
3684 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3685 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3686 break;
3687 /** @todo Read the AMD and Intel exception reference... */
3688 }
3689}
3690
3691
3692/**
3693 * Implements exceptions and interrupts for real mode.
3694 *
3695 * @returns VBox strict status code.
3696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3697 * @param cbInstr The number of bytes to offset rIP by in the return
3698 * address.
3699 * @param u8Vector The interrupt / exception vector number.
3700 * @param fFlags The flags.
3701 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3702 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3703 */
3704IEM_STATIC VBOXSTRICTRC
3705iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3706 uint8_t cbInstr,
3707 uint8_t u8Vector,
3708 uint32_t fFlags,
3709 uint16_t uErr,
3710 uint64_t uCr2)
3711{
3712 NOREF(uErr); NOREF(uCr2);
3713 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3714
3715 /*
3716 * Read the IDT entry.
3717 */
3718 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3719 {
3720 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3721 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3722 }
3723 RTFAR16 Idte;
3724 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3725 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3726 {
3727 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3728 return rcStrict;
3729 }
3730
3731 /*
3732 * Push the stack frame.
3733 */
3734 uint16_t *pu16Frame;
3735 uint64_t uNewRsp;
3736 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3737 if (rcStrict != VINF_SUCCESS)
3738 return rcStrict;
3739
3740 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3741#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3742 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3743 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3744 fEfl |= UINT16_C(0xf000);
3745#endif
3746 pu16Frame[2] = (uint16_t)fEfl;
3747 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3748 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3749 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3750 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3751 return rcStrict;
3752
3753 /*
3754 * Load the vector address into cs:ip and make exception specific state
3755 * adjustments.
3756 */
3757 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3758 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3759 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3760 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3761 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3762 pVCpu->cpum.GstCtx.rip = Idte.off;
3763 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3764 IEMMISC_SET_EFL(pVCpu, fEfl);
3765
3766 /** @todo do we actually do this in real mode? */
3767 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3768 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3769
3770 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3771}
3772
3773
3774/**
3775 * Loads a NULL data selector into when coming from V8086 mode.
3776 *
3777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3778 * @param pSReg Pointer to the segment register.
3779 */
3780IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3781{
3782 pSReg->Sel = 0;
3783 pSReg->ValidSel = 0;
3784 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3785 {
3786 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3787 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3788 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3789 }
3790 else
3791 {
3792 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3793 /** @todo check this on AMD-V */
3794 pSReg->u64Base = 0;
3795 pSReg->u32Limit = 0;
3796 }
3797}
3798
3799
3800/**
3801 * Loads a segment selector during a task switch in V8086 mode.
3802 *
3803 * @param pSReg Pointer to the segment register.
3804 * @param uSel The selector value to load.
3805 */
3806IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3807{
3808 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3809 pSReg->Sel = uSel;
3810 pSReg->ValidSel = uSel;
3811 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3812 pSReg->u64Base = uSel << 4;
3813 pSReg->u32Limit = 0xffff;
3814 pSReg->Attr.u = 0xf3;
3815}
3816
3817
3818/**
3819 * Loads a NULL data selector into a selector register, both the hidden and
3820 * visible parts, in protected mode.
3821 *
3822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3823 * @param pSReg Pointer to the segment register.
3824 * @param uRpl The RPL.
3825 */
3826IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3827{
3828 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3829 * data selector in protected mode. */
3830 pSReg->Sel = uRpl;
3831 pSReg->ValidSel = uRpl;
3832 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3833 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3834 {
3835 /* VT-x (Intel 3960x) observed doing something like this. */
3836 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3837 pSReg->u32Limit = UINT32_MAX;
3838 pSReg->u64Base = 0;
3839 }
3840 else
3841 {
3842 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3843 pSReg->u32Limit = 0;
3844 pSReg->u64Base = 0;
3845 }
3846}
3847
3848
3849/**
3850 * Loads a segment selector during a task switch in protected mode.
3851 *
3852 * In this task switch scenario, we would throw \#TS exceptions rather than
3853 * \#GPs.
3854 *
3855 * @returns VBox strict status code.
3856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3857 * @param pSReg Pointer to the segment register.
3858 * @param uSel The new selector value.
3859 *
3860 * @remarks This does _not_ handle CS or SS.
3861 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3862 */
3863IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3864{
3865 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3866
3867 /* Null data selector. */
3868 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3869 {
3870 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3871 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3872 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3873 return VINF_SUCCESS;
3874 }
3875
3876 /* Fetch the descriptor. */
3877 IEMSELDESC Desc;
3878 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3879 if (rcStrict != VINF_SUCCESS)
3880 {
3881 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3882 VBOXSTRICTRC_VAL(rcStrict)));
3883 return rcStrict;
3884 }
3885
3886 /* Must be a data segment or readable code segment. */
3887 if ( !Desc.Legacy.Gen.u1DescType
3888 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3889 {
3890 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3891 Desc.Legacy.Gen.u4Type));
3892 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3893 }
3894
3895 /* Check privileges for data segments and non-conforming code segments. */
3896 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3897 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3898 {
3899 /* The RPL and the new CPL must be less than or equal to the DPL. */
3900 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3901 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3902 {
3903 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3904 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3905 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3906 }
3907 }
3908
3909 /* Is it there? */
3910 if (!Desc.Legacy.Gen.u1Present)
3911 {
3912 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3913 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3914 }
3915
3916 /* The base and limit. */
3917 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3918 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3919
3920 /*
3921 * Ok, everything checked out fine. Now set the accessed bit before
3922 * committing the result into the registers.
3923 */
3924 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3925 {
3926 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3927 if (rcStrict != VINF_SUCCESS)
3928 return rcStrict;
3929 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3930 }
3931
3932 /* Commit */
3933 pSReg->Sel = uSel;
3934 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3935 pSReg->u32Limit = cbLimit;
3936 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3937 pSReg->ValidSel = uSel;
3938 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3939 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3940 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3941
3942 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3943 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3944 return VINF_SUCCESS;
3945}
3946
3947
3948/**
3949 * Performs a task switch.
3950 *
3951 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3952 * caller is responsible for performing the necessary checks (like DPL, TSS
3953 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3954 * reference for JMP, CALL, IRET.
3955 *
3956 * If the task switch is the due to a software interrupt or hardware exception,
3957 * the caller is responsible for validating the TSS selector and descriptor. See
3958 * Intel Instruction reference for INT n.
3959 *
3960 * @returns VBox strict status code.
3961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3962 * @param enmTaskSwitch What caused this task switch.
3963 * @param uNextEip The EIP effective after the task switch.
3964 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3965 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3966 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3967 * @param SelTSS The TSS selector of the new task.
3968 * @param pNewDescTSS Pointer to the new TSS descriptor.
3969 */
3970IEM_STATIC VBOXSTRICTRC
3971iemTaskSwitch(PVMCPU pVCpu,
3972 IEMTASKSWITCH enmTaskSwitch,
3973 uint32_t uNextEip,
3974 uint32_t fFlags,
3975 uint16_t uErr,
3976 uint64_t uCr2,
3977 RTSEL SelTSS,
3978 PIEMSELDESC pNewDescTSS)
3979{
3980 Assert(!IEM_IS_REAL_MODE(pVCpu));
3981 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3982 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3983
3984 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3985 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3986 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3987 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3988 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3989
3990 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3991 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3992
3993 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3994 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3995
3996 /* Update CR2 in case it's a page-fault. */
3997 /** @todo This should probably be done much earlier in IEM/PGM. See
3998 * @bugref{5653#c49}. */
3999 if (fFlags & IEM_XCPT_FLAGS_CR2)
4000 pVCpu->cpum.GstCtx.cr2 = uCr2;
4001
4002 /*
4003 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4004 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4005 */
4006 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4007 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4008 if (uNewTSSLimit < uNewTSSLimitMin)
4009 {
4010 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4011 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4012 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4013 }
4014
4015 /*
4016 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4017 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4018 */
4019 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4020 {
4021 uint32_t const uExitInfo1 = SelTSS;
4022 uint32_t uExitInfo2 = uErr;
4023 switch (enmTaskSwitch)
4024 {
4025 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4026 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4027 default: break;
4028 }
4029 if (fFlags & IEM_XCPT_FLAGS_ERR)
4030 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4031 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4032 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4033
4034 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4035 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4036 RT_NOREF2(uExitInfo1, uExitInfo2);
4037 }
4038 /** @todo Nested-VMX task-switch intercept. */
4039
4040 /*
4041 * Check the current TSS limit. The last written byte to the current TSS during the
4042 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4043 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4044 *
4045 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4046 * end up with smaller than "legal" TSS limits.
4047 */
4048 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4049 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4050 if (uCurTSSLimit < uCurTSSLimitMin)
4051 {
4052 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4053 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4054 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4055 }
4056
4057 /*
4058 * Verify that the new TSS can be accessed and map it. Map only the required contents
4059 * and not the entire TSS.
4060 */
4061 void *pvNewTSS;
4062 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4063 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4064 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4065 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4066 * not perform correct translation if this happens. See Intel spec. 7.2.1
4067 * "Task-State Segment" */
4068 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4069 if (rcStrict != VINF_SUCCESS)
4070 {
4071 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4072 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4073 return rcStrict;
4074 }
4075
4076 /*
4077 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4078 */
4079 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4080 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4081 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4082 {
4083 PX86DESC pDescCurTSS;
4084 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4085 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4086 if (rcStrict != VINF_SUCCESS)
4087 {
4088 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4089 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4090 return rcStrict;
4091 }
4092
4093 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4094 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4095 if (rcStrict != VINF_SUCCESS)
4096 {
4097 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4098 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4099 return rcStrict;
4100 }
4101
4102 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4103 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4104 {
4105 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4106 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4107 u32EFlags &= ~X86_EFL_NT;
4108 }
4109 }
4110
4111 /*
4112 * Save the CPU state into the current TSS.
4113 */
4114 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4115 if (GCPtrNewTSS == GCPtrCurTSS)
4116 {
4117 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4118 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4119 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ldtr.Sel));
4120 }
4121 if (fIsNewTSS386)
4122 {
4123 /*
4124 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4125 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4126 */
4127 void *pvCurTSS32;
4128 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4129 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4130 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4131 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4132 if (rcStrict != VINF_SUCCESS)
4133 {
4134 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4135 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4136 return rcStrict;
4137 }
4138
4139 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4140 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4141 pCurTSS32->eip = uNextEip;
4142 pCurTSS32->eflags = u32EFlags;
4143 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4144 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4145 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4146 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4147 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4148 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4149 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4150 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4151 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4152 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4153 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4154 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4155 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4156 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4157
4158 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4159 if (rcStrict != VINF_SUCCESS)
4160 {
4161 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4162 VBOXSTRICTRC_VAL(rcStrict)));
4163 return rcStrict;
4164 }
4165 }
4166 else
4167 {
4168 /*
4169 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4170 */
4171 void *pvCurTSS16;
4172 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4173 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4174 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4175 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4176 if (rcStrict != VINF_SUCCESS)
4177 {
4178 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4179 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4180 return rcStrict;
4181 }
4182
4183 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4184 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4185 pCurTSS16->ip = uNextEip;
4186 pCurTSS16->flags = u32EFlags;
4187 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4188 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4189 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4190 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4191 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4192 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4193 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4194 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4195 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4196 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4197 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4198 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4199
4200 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4201 if (rcStrict != VINF_SUCCESS)
4202 {
4203 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4204 VBOXSTRICTRC_VAL(rcStrict)));
4205 return rcStrict;
4206 }
4207 }
4208
4209 /*
4210 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4211 */
4212 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4213 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4214 {
4215 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4216 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4217 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4218 }
4219
4220 /*
4221 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4222 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4223 */
4224 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4225 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4226 bool fNewDebugTrap;
4227 if (fIsNewTSS386)
4228 {
4229 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4230 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4231 uNewEip = pNewTSS32->eip;
4232 uNewEflags = pNewTSS32->eflags;
4233 uNewEax = pNewTSS32->eax;
4234 uNewEcx = pNewTSS32->ecx;
4235 uNewEdx = pNewTSS32->edx;
4236 uNewEbx = pNewTSS32->ebx;
4237 uNewEsp = pNewTSS32->esp;
4238 uNewEbp = pNewTSS32->ebp;
4239 uNewEsi = pNewTSS32->esi;
4240 uNewEdi = pNewTSS32->edi;
4241 uNewES = pNewTSS32->es;
4242 uNewCS = pNewTSS32->cs;
4243 uNewSS = pNewTSS32->ss;
4244 uNewDS = pNewTSS32->ds;
4245 uNewFS = pNewTSS32->fs;
4246 uNewGS = pNewTSS32->gs;
4247 uNewLdt = pNewTSS32->selLdt;
4248 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4249 }
4250 else
4251 {
4252 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4253 uNewCr3 = 0;
4254 uNewEip = pNewTSS16->ip;
4255 uNewEflags = pNewTSS16->flags;
4256 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4257 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4258 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4259 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4260 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4261 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4262 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4263 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4264 uNewES = pNewTSS16->es;
4265 uNewCS = pNewTSS16->cs;
4266 uNewSS = pNewTSS16->ss;
4267 uNewDS = pNewTSS16->ds;
4268 uNewFS = 0;
4269 uNewGS = 0;
4270 uNewLdt = pNewTSS16->selLdt;
4271 fNewDebugTrap = false;
4272 }
4273
4274 if (GCPtrNewTSS == GCPtrCurTSS)
4275 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4276 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4277
4278 /*
4279 * We're done accessing the new TSS.
4280 */
4281 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4282 if (rcStrict != VINF_SUCCESS)
4283 {
4284 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4285 return rcStrict;
4286 }
4287
4288 /*
4289 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4290 */
4291 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4292 {
4293 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4294 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4295 if (rcStrict != VINF_SUCCESS)
4296 {
4297 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4298 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4299 return rcStrict;
4300 }
4301
4302 /* Check that the descriptor indicates the new TSS is available (not busy). */
4303 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4304 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4305 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4306
4307 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4308 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4309 if (rcStrict != VINF_SUCCESS)
4310 {
4311 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4312 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4313 return rcStrict;
4314 }
4315 }
4316
4317 /*
4318 * From this point on, we're technically in the new task. We will defer exceptions
4319 * until the completion of the task switch but before executing any instructions in the new task.
4320 */
4321 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4322 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4323 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4324 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4325 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4326 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4327 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4328
4329 /* Set the busy bit in TR. */
4330 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4331 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4332 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4333 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4334 {
4335 uNewEflags |= X86_EFL_NT;
4336 }
4337
4338 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4339 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4340 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4341
4342 pVCpu->cpum.GstCtx.eip = uNewEip;
4343 pVCpu->cpum.GstCtx.eax = uNewEax;
4344 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4345 pVCpu->cpum.GstCtx.edx = uNewEdx;
4346 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4347 pVCpu->cpum.GstCtx.esp = uNewEsp;
4348 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4349 pVCpu->cpum.GstCtx.esi = uNewEsi;
4350 pVCpu->cpum.GstCtx.edi = uNewEdi;
4351
4352 uNewEflags &= X86_EFL_LIVE_MASK;
4353 uNewEflags |= X86_EFL_RA1_MASK;
4354 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4355
4356 /*
4357 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4358 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4359 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4360 */
4361 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4362 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4363
4364 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4365 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4366
4367 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4368 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4369
4370 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4371 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4372
4373 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4374 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4375
4376 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4377 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4378 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4379
4380 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4381 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4382 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4383 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4384
4385 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4386 {
4387 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4388 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4389 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4390 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4391 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4392 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4393 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4394 }
4395
4396 /*
4397 * Switch CR3 for the new task.
4398 */
4399 if ( fIsNewTSS386
4400 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4401 {
4402 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4403 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4404 AssertRCSuccessReturn(rc, rc);
4405
4406 /* Inform PGM. */
4407 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4408 AssertRCReturn(rc, rc);
4409 /* ignore informational status codes */
4410
4411 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4412 }
4413
4414 /*
4415 * Switch LDTR for the new task.
4416 */
4417 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4418 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4419 else
4420 {
4421 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4422
4423 IEMSELDESC DescNewLdt;
4424 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4425 if (rcStrict != VINF_SUCCESS)
4426 {
4427 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4428 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4429 return rcStrict;
4430 }
4431 if ( !DescNewLdt.Legacy.Gen.u1Present
4432 || DescNewLdt.Legacy.Gen.u1DescType
4433 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4434 {
4435 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4436 uNewLdt, DescNewLdt.Legacy.u));
4437 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4438 }
4439
4440 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4441 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4442 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4443 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4444 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4445 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4446 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4447 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4448 }
4449
4450 IEMSELDESC DescSS;
4451 if (IEM_IS_V86_MODE(pVCpu))
4452 {
4453 pVCpu->iem.s.uCpl = 3;
4454 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4455 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4456 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4457 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4458 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4459 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4460
4461 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4462 DescSS.Legacy.u = 0;
4463 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4464 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4465 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4466 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4467 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4468 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4469 DescSS.Legacy.Gen.u2Dpl = 3;
4470 }
4471 else
4472 {
4473 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4474
4475 /*
4476 * Load the stack segment for the new task.
4477 */
4478 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4479 {
4480 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4481 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4482 }
4483
4484 /* Fetch the descriptor. */
4485 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4486 if (rcStrict != VINF_SUCCESS)
4487 {
4488 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4489 VBOXSTRICTRC_VAL(rcStrict)));
4490 return rcStrict;
4491 }
4492
4493 /* SS must be a data segment and writable. */
4494 if ( !DescSS.Legacy.Gen.u1DescType
4495 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4496 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4497 {
4498 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4499 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4500 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4501 }
4502
4503 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4504 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4505 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4506 {
4507 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4508 uNewCpl));
4509 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4510 }
4511
4512 /* Is it there? */
4513 if (!DescSS.Legacy.Gen.u1Present)
4514 {
4515 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4516 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4517 }
4518
4519 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4520 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4521
4522 /* Set the accessed bit before committing the result into SS. */
4523 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4524 {
4525 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4526 if (rcStrict != VINF_SUCCESS)
4527 return rcStrict;
4528 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4529 }
4530
4531 /* Commit SS. */
4532 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4533 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4534 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4535 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4536 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4537 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4538 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4539
4540 /* CPL has changed, update IEM before loading rest of segments. */
4541 pVCpu->iem.s.uCpl = uNewCpl;
4542
4543 /*
4544 * Load the data segments for the new task.
4545 */
4546 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4547 if (rcStrict != VINF_SUCCESS)
4548 return rcStrict;
4549 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4550 if (rcStrict != VINF_SUCCESS)
4551 return rcStrict;
4552 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4553 if (rcStrict != VINF_SUCCESS)
4554 return rcStrict;
4555 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4556 if (rcStrict != VINF_SUCCESS)
4557 return rcStrict;
4558
4559 /*
4560 * Load the code segment for the new task.
4561 */
4562 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4563 {
4564 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4565 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4566 }
4567
4568 /* Fetch the descriptor. */
4569 IEMSELDESC DescCS;
4570 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4571 if (rcStrict != VINF_SUCCESS)
4572 {
4573 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4574 return rcStrict;
4575 }
4576
4577 /* CS must be a code segment. */
4578 if ( !DescCS.Legacy.Gen.u1DescType
4579 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4580 {
4581 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4582 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4583 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4584 }
4585
4586 /* For conforming CS, DPL must be less than or equal to the RPL. */
4587 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4588 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4589 {
4590 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4591 DescCS.Legacy.Gen.u2Dpl));
4592 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4593 }
4594
4595 /* For non-conforming CS, DPL must match RPL. */
4596 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4597 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4598 {
4599 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4600 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4601 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4602 }
4603
4604 /* Is it there? */
4605 if (!DescCS.Legacy.Gen.u1Present)
4606 {
4607 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4608 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4609 }
4610
4611 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4612 u64Base = X86DESC_BASE(&DescCS.Legacy);
4613
4614 /* Set the accessed bit before committing the result into CS. */
4615 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4616 {
4617 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4618 if (rcStrict != VINF_SUCCESS)
4619 return rcStrict;
4620 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4621 }
4622
4623 /* Commit CS. */
4624 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4625 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4626 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4627 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4628 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4629 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4630 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4631 }
4632
4633 /** @todo Debug trap. */
4634 if (fIsNewTSS386 && fNewDebugTrap)
4635 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4636
4637 /*
4638 * Construct the error code masks based on what caused this task switch.
4639 * See Intel Instruction reference for INT.
4640 */
4641 uint16_t uExt;
4642 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4643 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4644 {
4645 uExt = 1;
4646 }
4647 else
4648 uExt = 0;
4649
4650 /*
4651 * Push any error code on to the new stack.
4652 */
4653 if (fFlags & IEM_XCPT_FLAGS_ERR)
4654 {
4655 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4656 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4657 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4658
4659 /* Check that there is sufficient space on the stack. */
4660 /** @todo Factor out segment limit checking for normal/expand down segments
4661 * into a separate function. */
4662 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4663 {
4664 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4665 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4666 {
4667 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4668 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4669 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4670 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4671 }
4672 }
4673 else
4674 {
4675 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4676 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4677 {
4678 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4679 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4680 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4681 }
4682 }
4683
4684
4685 if (fIsNewTSS386)
4686 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4687 else
4688 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4689 if (rcStrict != VINF_SUCCESS)
4690 {
4691 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4692 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4693 return rcStrict;
4694 }
4695 }
4696
4697 /* Check the new EIP against the new CS limit. */
4698 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4699 {
4700 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4701 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4702 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4703 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4704 }
4705
4706 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.ss.Sel));
4707 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4708}
4709
4710
4711/**
4712 * Implements exceptions and interrupts for protected mode.
4713 *
4714 * @returns VBox strict status code.
4715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4716 * @param cbInstr The number of bytes to offset rIP by in the return
4717 * address.
4718 * @param u8Vector The interrupt / exception vector number.
4719 * @param fFlags The flags.
4720 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4721 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4722 */
4723IEM_STATIC VBOXSTRICTRC
4724iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4725 uint8_t cbInstr,
4726 uint8_t u8Vector,
4727 uint32_t fFlags,
4728 uint16_t uErr,
4729 uint64_t uCr2)
4730{
4731 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4732
4733 /*
4734 * Read the IDT entry.
4735 */
4736 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4737 {
4738 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4739 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4740 }
4741 X86DESC Idte;
4742 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4743 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4744 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4745 {
4746 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4747 return rcStrict;
4748 }
4749 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4750 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4751 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4752
4753 /*
4754 * Check the descriptor type, DPL and such.
4755 * ASSUMES this is done in the same order as described for call-gate calls.
4756 */
4757 if (Idte.Gate.u1DescType)
4758 {
4759 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4760 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4761 }
4762 bool fTaskGate = false;
4763 uint8_t f32BitGate = true;
4764 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4765 switch (Idte.Gate.u4Type)
4766 {
4767 case X86_SEL_TYPE_SYS_UNDEFINED:
4768 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4769 case X86_SEL_TYPE_SYS_LDT:
4770 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4771 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4772 case X86_SEL_TYPE_SYS_UNDEFINED2:
4773 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4774 case X86_SEL_TYPE_SYS_UNDEFINED3:
4775 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4776 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4777 case X86_SEL_TYPE_SYS_UNDEFINED4:
4778 {
4779 /** @todo check what actually happens when the type is wrong...
4780 * esp. call gates. */
4781 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4782 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4783 }
4784
4785 case X86_SEL_TYPE_SYS_286_INT_GATE:
4786 f32BitGate = false;
4787 RT_FALL_THRU();
4788 case X86_SEL_TYPE_SYS_386_INT_GATE:
4789 fEflToClear |= X86_EFL_IF;
4790 break;
4791
4792 case X86_SEL_TYPE_SYS_TASK_GATE:
4793 fTaskGate = true;
4794#ifndef IEM_IMPLEMENTS_TASKSWITCH
4795 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4796#endif
4797 break;
4798
4799 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4800 f32BitGate = false;
4801 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4802 break;
4803
4804 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4805 }
4806
4807 /* Check DPL against CPL if applicable. */
4808 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4809 {
4810 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4811 {
4812 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4813 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4814 }
4815 }
4816
4817 /* Is it there? */
4818 if (!Idte.Gate.u1Present)
4819 {
4820 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4821 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4822 }
4823
4824 /* Is it a task-gate? */
4825 if (fTaskGate)
4826 {
4827 /*
4828 * Construct the error code masks based on what caused this task switch.
4829 * See Intel Instruction reference for INT.
4830 */
4831 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4832 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4833 RTSEL SelTSS = Idte.Gate.u16Sel;
4834
4835 /*
4836 * Fetch the TSS descriptor in the GDT.
4837 */
4838 IEMSELDESC DescTSS;
4839 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4840 if (rcStrict != VINF_SUCCESS)
4841 {
4842 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4843 VBOXSTRICTRC_VAL(rcStrict)));
4844 return rcStrict;
4845 }
4846
4847 /* The TSS descriptor must be a system segment and be available (not busy). */
4848 if ( DescTSS.Legacy.Gen.u1DescType
4849 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4850 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4851 {
4852 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4853 u8Vector, SelTSS, DescTSS.Legacy.au64));
4854 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4855 }
4856
4857 /* The TSS must be present. */
4858 if (!DescTSS.Legacy.Gen.u1Present)
4859 {
4860 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4861 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4862 }
4863
4864 /* Do the actual task switch. */
4865 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT, pVCpu->cpum.GstCtx.eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4866 }
4867
4868 /* A null CS is bad. */
4869 RTSEL NewCS = Idte.Gate.u16Sel;
4870 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4871 {
4872 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4873 return iemRaiseGeneralProtectionFault0(pVCpu);
4874 }
4875
4876 /* Fetch the descriptor for the new CS. */
4877 IEMSELDESC DescCS;
4878 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4879 if (rcStrict != VINF_SUCCESS)
4880 {
4881 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4882 return rcStrict;
4883 }
4884
4885 /* Must be a code segment. */
4886 if (!DescCS.Legacy.Gen.u1DescType)
4887 {
4888 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4889 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4890 }
4891 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4892 {
4893 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4894 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4895 }
4896
4897 /* Don't allow lowering the privilege level. */
4898 /** @todo Does the lowering of privileges apply to software interrupts
4899 * only? This has bearings on the more-privileged or
4900 * same-privilege stack behavior further down. A testcase would
4901 * be nice. */
4902 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4903 {
4904 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4905 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4906 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4907 }
4908
4909 /* Make sure the selector is present. */
4910 if (!DescCS.Legacy.Gen.u1Present)
4911 {
4912 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4913 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4914 }
4915
4916 /* Check the new EIP against the new CS limit. */
4917 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4918 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4919 ? Idte.Gate.u16OffsetLow
4920 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4921 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4922 if (uNewEip > cbLimitCS)
4923 {
4924 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4925 u8Vector, uNewEip, cbLimitCS, NewCS));
4926 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4927 }
4928 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4929
4930 /* Calc the flag image to push. */
4931 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4932 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4933 fEfl &= ~X86_EFL_RF;
4934 else
4935 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4936
4937 /* From V8086 mode only go to CPL 0. */
4938 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4939 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4940 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4941 {
4942 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4943 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4944 }
4945
4946 /*
4947 * If the privilege level changes, we need to get a new stack from the TSS.
4948 * This in turns means validating the new SS and ESP...
4949 */
4950 if (uNewCpl != pVCpu->iem.s.uCpl)
4951 {
4952 RTSEL NewSS;
4953 uint32_t uNewEsp;
4954 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4955 if (rcStrict != VINF_SUCCESS)
4956 return rcStrict;
4957
4958 IEMSELDESC DescSS;
4959 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4960 if (rcStrict != VINF_SUCCESS)
4961 return rcStrict;
4962 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4963 if (!DescSS.Legacy.Gen.u1DefBig)
4964 {
4965 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4966 uNewEsp = (uint16_t)uNewEsp;
4967 }
4968
4969 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4970
4971 /* Check that there is sufficient space for the stack frame. */
4972 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4973 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4974 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4975 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4976
4977 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4978 {
4979 if ( uNewEsp - 1 > cbLimitSS
4980 || uNewEsp < cbStackFrame)
4981 {
4982 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4983 u8Vector, NewSS, uNewEsp, cbStackFrame));
4984 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4985 }
4986 }
4987 else
4988 {
4989 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4990 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4991 {
4992 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4993 u8Vector, NewSS, uNewEsp, cbStackFrame));
4994 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4995 }
4996 }
4997
4998 /*
4999 * Start making changes.
5000 */
5001
5002 /* Set the new CPL so that stack accesses use it. */
5003 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5004 pVCpu->iem.s.uCpl = uNewCpl;
5005
5006 /* Create the stack frame. */
5007 RTPTRUNION uStackFrame;
5008 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5009 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5010 if (rcStrict != VINF_SUCCESS)
5011 return rcStrict;
5012 void * const pvStackFrame = uStackFrame.pv;
5013 if (f32BitGate)
5014 {
5015 if (fFlags & IEM_XCPT_FLAGS_ERR)
5016 *uStackFrame.pu32++ = uErr;
5017 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5018 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5019 uStackFrame.pu32[2] = fEfl;
5020 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5021 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5022 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5023 if (fEfl & X86_EFL_VM)
5024 {
5025 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5026 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5027 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5028 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5029 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5030 }
5031 }
5032 else
5033 {
5034 if (fFlags & IEM_XCPT_FLAGS_ERR)
5035 *uStackFrame.pu16++ = uErr;
5036 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5037 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5038 uStackFrame.pu16[2] = fEfl;
5039 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5040 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5041 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5042 if (fEfl & X86_EFL_VM)
5043 {
5044 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5045 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5046 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5047 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5048 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5049 }
5050 }
5051 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5052 if (rcStrict != VINF_SUCCESS)
5053 return rcStrict;
5054
5055 /* Mark the selectors 'accessed' (hope this is the correct time). */
5056 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5057 * after pushing the stack frame? (Write protect the gdt + stack to
5058 * find out.) */
5059 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5060 {
5061 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5062 if (rcStrict != VINF_SUCCESS)
5063 return rcStrict;
5064 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5065 }
5066
5067 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5068 {
5069 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5070 if (rcStrict != VINF_SUCCESS)
5071 return rcStrict;
5072 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5073 }
5074
5075 /*
5076 * Start comitting the register changes (joins with the DPL=CPL branch).
5077 */
5078 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5079 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5080 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5081 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5082 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5083 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5084 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5085 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5086 * SP is loaded).
5087 * Need to check the other combinations too:
5088 * - 16-bit TSS, 32-bit handler
5089 * - 32-bit TSS, 16-bit handler */
5090 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5091 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5092 else
5093 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5094
5095 if (fEfl & X86_EFL_VM)
5096 {
5097 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5098 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5099 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5100 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5101 }
5102 }
5103 /*
5104 * Same privilege, no stack change and smaller stack frame.
5105 */
5106 else
5107 {
5108 uint64_t uNewRsp;
5109 RTPTRUNION uStackFrame;
5110 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5111 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5112 if (rcStrict != VINF_SUCCESS)
5113 return rcStrict;
5114 void * const pvStackFrame = uStackFrame.pv;
5115
5116 if (f32BitGate)
5117 {
5118 if (fFlags & IEM_XCPT_FLAGS_ERR)
5119 *uStackFrame.pu32++ = uErr;
5120 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5121 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5122 uStackFrame.pu32[2] = fEfl;
5123 }
5124 else
5125 {
5126 if (fFlags & IEM_XCPT_FLAGS_ERR)
5127 *uStackFrame.pu16++ = uErr;
5128 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5129 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5130 uStackFrame.pu16[2] = fEfl;
5131 }
5132 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5133 if (rcStrict != VINF_SUCCESS)
5134 return rcStrict;
5135
5136 /* Mark the CS selector as 'accessed'. */
5137 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5138 {
5139 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5140 if (rcStrict != VINF_SUCCESS)
5141 return rcStrict;
5142 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5143 }
5144
5145 /*
5146 * Start committing the register changes (joins with the other branch).
5147 */
5148 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5149 }
5150
5151 /* ... register committing continues. */
5152 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5153 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5154 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5155 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5156 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5157 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5158
5159 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5160 fEfl &= ~fEflToClear;
5161 IEMMISC_SET_EFL(pVCpu, fEfl);
5162
5163 if (fFlags & IEM_XCPT_FLAGS_CR2)
5164 pVCpu->cpum.GstCtx.cr2 = uCr2;
5165
5166 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5167 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5168
5169 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5170}
5171
5172
5173/**
5174 * Implements exceptions and interrupts for long mode.
5175 *
5176 * @returns VBox strict status code.
5177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5178 * @param cbInstr The number of bytes to offset rIP by in the return
5179 * address.
5180 * @param u8Vector The interrupt / exception vector number.
5181 * @param fFlags The flags.
5182 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5183 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5184 */
5185IEM_STATIC VBOXSTRICTRC
5186iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5187 uint8_t cbInstr,
5188 uint8_t u8Vector,
5189 uint32_t fFlags,
5190 uint16_t uErr,
5191 uint64_t uCr2)
5192{
5193 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5194
5195 /*
5196 * Read the IDT entry.
5197 */
5198 uint16_t offIdt = (uint16_t)u8Vector << 4;
5199 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5200 {
5201 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5202 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5203 }
5204 X86DESC64 Idte;
5205 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5206 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5207 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5208 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5209 {
5210 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5211 return rcStrict;
5212 }
5213 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5214 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5215 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5216
5217 /*
5218 * Check the descriptor type, DPL and such.
5219 * ASSUMES this is done in the same order as described for call-gate calls.
5220 */
5221 if (Idte.Gate.u1DescType)
5222 {
5223 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5224 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5225 }
5226 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5227 switch (Idte.Gate.u4Type)
5228 {
5229 case AMD64_SEL_TYPE_SYS_INT_GATE:
5230 fEflToClear |= X86_EFL_IF;
5231 break;
5232 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5233 break;
5234
5235 default:
5236 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5237 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5238 }
5239
5240 /* Check DPL against CPL if applicable. */
5241 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5242 {
5243 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5244 {
5245 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5246 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5247 }
5248 }
5249
5250 /* Is it there? */
5251 if (!Idte.Gate.u1Present)
5252 {
5253 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5254 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5255 }
5256
5257 /* A null CS is bad. */
5258 RTSEL NewCS = Idte.Gate.u16Sel;
5259 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5260 {
5261 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5262 return iemRaiseGeneralProtectionFault0(pVCpu);
5263 }
5264
5265 /* Fetch the descriptor for the new CS. */
5266 IEMSELDESC DescCS;
5267 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5268 if (rcStrict != VINF_SUCCESS)
5269 {
5270 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5271 return rcStrict;
5272 }
5273
5274 /* Must be a 64-bit code segment. */
5275 if (!DescCS.Long.Gen.u1DescType)
5276 {
5277 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5278 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5279 }
5280 if ( !DescCS.Long.Gen.u1Long
5281 || DescCS.Long.Gen.u1DefBig
5282 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5283 {
5284 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5285 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5286 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5287 }
5288
5289 /* Don't allow lowering the privilege level. For non-conforming CS
5290 selectors, the CS.DPL sets the privilege level the trap/interrupt
5291 handler runs at. For conforming CS selectors, the CPL remains
5292 unchanged, but the CS.DPL must be <= CPL. */
5293 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5294 * when CPU in Ring-0. Result \#GP? */
5295 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5296 {
5297 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5298 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5299 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5300 }
5301
5302
5303 /* Make sure the selector is present. */
5304 if (!DescCS.Legacy.Gen.u1Present)
5305 {
5306 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5307 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5308 }
5309
5310 /* Check that the new RIP is canonical. */
5311 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5312 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5313 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5314 if (!IEM_IS_CANONICAL(uNewRip))
5315 {
5316 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5317 return iemRaiseGeneralProtectionFault0(pVCpu);
5318 }
5319
5320 /*
5321 * If the privilege level changes or if the IST isn't zero, we need to get
5322 * a new stack from the TSS.
5323 */
5324 uint64_t uNewRsp;
5325 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5326 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5327 if ( uNewCpl != pVCpu->iem.s.uCpl
5328 || Idte.Gate.u3IST != 0)
5329 {
5330 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5331 if (rcStrict != VINF_SUCCESS)
5332 return rcStrict;
5333 }
5334 else
5335 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5336 uNewRsp &= ~(uint64_t)0xf;
5337
5338 /*
5339 * Calc the flag image to push.
5340 */
5341 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5342 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5343 fEfl &= ~X86_EFL_RF;
5344 else
5345 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5346
5347 /*
5348 * Start making changes.
5349 */
5350 /* Set the new CPL so that stack accesses use it. */
5351 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5352 pVCpu->iem.s.uCpl = uNewCpl;
5353
5354 /* Create the stack frame. */
5355 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5356 RTPTRUNION uStackFrame;
5357 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5358 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5359 if (rcStrict != VINF_SUCCESS)
5360 return rcStrict;
5361 void * const pvStackFrame = uStackFrame.pv;
5362
5363 if (fFlags & IEM_XCPT_FLAGS_ERR)
5364 *uStackFrame.pu64++ = uErr;
5365 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5366 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5367 uStackFrame.pu64[2] = fEfl;
5368 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5369 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5370 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5371 if (rcStrict != VINF_SUCCESS)
5372 return rcStrict;
5373
5374 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5375 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5376 * after pushing the stack frame? (Write protect the gdt + stack to
5377 * find out.) */
5378 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5379 {
5380 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5381 if (rcStrict != VINF_SUCCESS)
5382 return rcStrict;
5383 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5384 }
5385
5386 /*
5387 * Start comitting the register changes.
5388 */
5389 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5390 * hidden registers when interrupting 32-bit or 16-bit code! */
5391 if (uNewCpl != uOldCpl)
5392 {
5393 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5394 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5395 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5396 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5397 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5398 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5399 }
5400 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5401 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5402 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5403 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5404 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5405 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5406 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5407 pVCpu->cpum.GstCtx.rip = uNewRip;
5408
5409 fEfl &= ~fEflToClear;
5410 IEMMISC_SET_EFL(pVCpu, fEfl);
5411
5412 if (fFlags & IEM_XCPT_FLAGS_CR2)
5413 pVCpu->cpum.GstCtx.cr2 = uCr2;
5414
5415 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5416 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5417
5418 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5419}
5420
5421
5422/**
5423 * Implements exceptions and interrupts.
5424 *
5425 * All exceptions and interrupts goes thru this function!
5426 *
5427 * @returns VBox strict status code.
5428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5429 * @param cbInstr The number of bytes to offset rIP by in the return
5430 * address.
5431 * @param u8Vector The interrupt / exception vector number.
5432 * @param fFlags The flags.
5433 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5434 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5435 */
5436DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5437iemRaiseXcptOrInt(PVMCPU pVCpu,
5438 uint8_t cbInstr,
5439 uint8_t u8Vector,
5440 uint32_t fFlags,
5441 uint16_t uErr,
5442 uint64_t uCr2)
5443{
5444 /*
5445 * Get all the state that we might need here.
5446 */
5447 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5448 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5449
5450#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5451 /*
5452 * Flush prefetch buffer
5453 */
5454 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5455#endif
5456
5457 /*
5458 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5459 */
5460 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5461 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5462 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5463 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5464 {
5465 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5466 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5467 u8Vector = X86_XCPT_GP;
5468 uErr = 0;
5469 }
5470#ifdef DBGFTRACE_ENABLED
5471 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5472 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5473 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5474#endif
5475
5476#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5477 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5478 {
5479 /*
5480 * If the event is being injected as part of VMRUN, it isn't subject to event
5481 * intercepts in the nested-guest. However, secondary exceptions that occur
5482 * during injection of any event -are- subject to exception intercepts.
5483 * See AMD spec. 15.20 "Event Injection".
5484 */
5485 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5486 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = 1;
5487 else
5488 {
5489 /*
5490 * Check and handle if the event being raised is intercepted.
5491 */
5492 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5493 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5494 return rcStrict0;
5495 }
5496 }
5497#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5498
5499 /*
5500 * Do recursion accounting.
5501 */
5502 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5503 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5504 if (pVCpu->iem.s.cXcptRecursions == 0)
5505 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5506 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5507 else
5508 {
5509 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5510 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5511 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5512
5513 if (pVCpu->iem.s.cXcptRecursions >= 4)
5514 {
5515#ifdef DEBUG_bird
5516 AssertFailed();
5517#endif
5518 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5519 }
5520
5521 /*
5522 * Evaluate the sequence of recurring events.
5523 */
5524 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5525 NULL /* pXcptRaiseInfo */);
5526 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5527 { /* likely */ }
5528 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5529 {
5530 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5531 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5532 u8Vector = X86_XCPT_DF;
5533 uErr = 0;
5534 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5535 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5536 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5537 }
5538 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5539 {
5540 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5541 return iemInitiateCpuShutdown(pVCpu);
5542 }
5543 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5544 {
5545 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5546 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5547 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5548 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5549 return VERR_EM_GUEST_CPU_HANG;
5550 }
5551 else
5552 {
5553 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5554 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5555 return VERR_IEM_IPE_9;
5556 }
5557
5558 /*
5559 * The 'EXT' bit is set when an exception occurs during deliver of an external
5560 * event (such as an interrupt or earlier exception)[1]. Privileged software
5561 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5562 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5563 *
5564 * [1] - Intel spec. 6.13 "Error Code"
5565 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5566 * [3] - Intel Instruction reference for INT n.
5567 */
5568 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5569 && (fFlags & IEM_XCPT_FLAGS_ERR)
5570 && u8Vector != X86_XCPT_PF
5571 && u8Vector != X86_XCPT_DF)
5572 {
5573 uErr |= X86_TRAP_ERR_EXTERNAL;
5574 }
5575 }
5576
5577 pVCpu->iem.s.cXcptRecursions++;
5578 pVCpu->iem.s.uCurXcpt = u8Vector;
5579 pVCpu->iem.s.fCurXcpt = fFlags;
5580 pVCpu->iem.s.uCurXcptErr = uErr;
5581 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5582
5583 /*
5584 * Extensive logging.
5585 */
5586#if defined(LOG_ENABLED) && defined(IN_RING3)
5587 if (LogIs3Enabled())
5588 {
5589 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5590 PVM pVM = pVCpu->CTX_SUFF(pVM);
5591 char szRegs[4096];
5592 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5593 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5594 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5595 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5596 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5597 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5598 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5599 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5600 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5601 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5602 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5603 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5604 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5605 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5606 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5607 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5608 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5609 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5610 " efer=%016VR{efer}\n"
5611 " pat=%016VR{pat}\n"
5612 " sf_mask=%016VR{sf_mask}\n"
5613 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5614 " lstar=%016VR{lstar}\n"
5615 " star=%016VR{star} cstar=%016VR{cstar}\n"
5616 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5617 );
5618
5619 char szInstr[256];
5620 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5621 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5622 szInstr, sizeof(szInstr), NULL);
5623 Log3(("%s%s\n", szRegs, szInstr));
5624 }
5625#endif /* LOG_ENABLED */
5626
5627 /*
5628 * Call the mode specific worker function.
5629 */
5630 VBOXSTRICTRC rcStrict;
5631 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5632 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5633 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5634 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5635 else
5636 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5637
5638 /* Flush the prefetch buffer. */
5639#ifdef IEM_WITH_CODE_TLB
5640 pVCpu->iem.s.pbInstrBuf = NULL;
5641#else
5642 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5643#endif
5644
5645 /*
5646 * Unwind.
5647 */
5648 pVCpu->iem.s.cXcptRecursions--;
5649 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5650 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5651 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5652 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5653 pVCpu->iem.s.cXcptRecursions + 1));
5654 return rcStrict;
5655}
5656
5657#ifdef IEM_WITH_SETJMP
5658/**
5659 * See iemRaiseXcptOrInt. Will not return.
5660 */
5661IEM_STATIC DECL_NO_RETURN(void)
5662iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5663 uint8_t cbInstr,
5664 uint8_t u8Vector,
5665 uint32_t fFlags,
5666 uint16_t uErr,
5667 uint64_t uCr2)
5668{
5669 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5670 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5671}
5672#endif
5673
5674
5675/** \#DE - 00. */
5676DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5677{
5678 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5679}
5680
5681
5682/** \#DB - 01.
5683 * @note This automatically clear DR7.GD. */
5684DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5685{
5686 /** @todo set/clear RF. */
5687 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5688 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5689}
5690
5691
5692/** \#BR - 05. */
5693DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5694{
5695 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5696}
5697
5698
5699/** \#UD - 06. */
5700DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5701{
5702 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5703}
5704
5705
5706/** \#NM - 07. */
5707DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5708{
5709 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5710}
5711
5712
5713/** \#TS(err) - 0a. */
5714DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5715{
5716 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5717}
5718
5719
5720/** \#TS(tr) - 0a. */
5721DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5722{
5723 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5724 pVCpu->cpum.GstCtx.tr.Sel, 0);
5725}
5726
5727
5728/** \#TS(0) - 0a. */
5729DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5730{
5731 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5732 0, 0);
5733}
5734
5735
5736/** \#TS(err) - 0a. */
5737DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5738{
5739 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5740 uSel & X86_SEL_MASK_OFF_RPL, 0);
5741}
5742
5743
5744/** \#NP(err) - 0b. */
5745DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5746{
5747 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5748}
5749
5750
5751/** \#NP(sel) - 0b. */
5752DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5753{
5754 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5755 uSel & ~X86_SEL_RPL, 0);
5756}
5757
5758
5759/** \#SS(seg) - 0c. */
5760DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5761{
5762 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5763 uSel & ~X86_SEL_RPL, 0);
5764}
5765
5766
5767/** \#SS(err) - 0c. */
5768DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5769{
5770 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5771}
5772
5773
5774/** \#GP(n) - 0d. */
5775DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5776{
5777 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5778}
5779
5780
5781/** \#GP(0) - 0d. */
5782DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5783{
5784 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5785}
5786
5787#ifdef IEM_WITH_SETJMP
5788/** \#GP(0) - 0d. */
5789DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5790{
5791 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5792}
5793#endif
5794
5795
5796/** \#GP(sel) - 0d. */
5797DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5798{
5799 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5800 Sel & ~X86_SEL_RPL, 0);
5801}
5802
5803
5804/** \#GP(0) - 0d. */
5805DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5806{
5807 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5808}
5809
5810
5811/** \#GP(sel) - 0d. */
5812DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5813{
5814 NOREF(iSegReg); NOREF(fAccess);
5815 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5816 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5817}
5818
5819#ifdef IEM_WITH_SETJMP
5820/** \#GP(sel) - 0d, longjmp. */
5821DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5822{
5823 NOREF(iSegReg); NOREF(fAccess);
5824 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5825 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5826}
5827#endif
5828
5829/** \#GP(sel) - 0d. */
5830DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5831{
5832 NOREF(Sel);
5833 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5834}
5835
5836#ifdef IEM_WITH_SETJMP
5837/** \#GP(sel) - 0d, longjmp. */
5838DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5839{
5840 NOREF(Sel);
5841 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5842}
5843#endif
5844
5845
5846/** \#GP(sel) - 0d. */
5847DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5848{
5849 NOREF(iSegReg); NOREF(fAccess);
5850 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5851}
5852
5853#ifdef IEM_WITH_SETJMP
5854/** \#GP(sel) - 0d, longjmp. */
5855DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5856 uint32_t fAccess)
5857{
5858 NOREF(iSegReg); NOREF(fAccess);
5859 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5860}
5861#endif
5862
5863
5864/** \#PF(n) - 0e. */
5865DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5866{
5867 uint16_t uErr;
5868 switch (rc)
5869 {
5870 case VERR_PAGE_NOT_PRESENT:
5871 case VERR_PAGE_TABLE_NOT_PRESENT:
5872 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5873 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5874 uErr = 0;
5875 break;
5876
5877 default:
5878 AssertMsgFailed(("%Rrc\n", rc));
5879 RT_FALL_THRU();
5880 case VERR_ACCESS_DENIED:
5881 uErr = X86_TRAP_PF_P;
5882 break;
5883
5884 /** @todo reserved */
5885 }
5886
5887 if (pVCpu->iem.s.uCpl == 3)
5888 uErr |= X86_TRAP_PF_US;
5889
5890 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5891 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5892 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5893 uErr |= X86_TRAP_PF_ID;
5894
5895#if 0 /* This is so much non-sense, really. Why was it done like that? */
5896 /* Note! RW access callers reporting a WRITE protection fault, will clear
5897 the READ flag before calling. So, read-modify-write accesses (RW)
5898 can safely be reported as READ faults. */
5899 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5900 uErr |= X86_TRAP_PF_RW;
5901#else
5902 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5903 {
5904 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5905 uErr |= X86_TRAP_PF_RW;
5906 }
5907#endif
5908
5909 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5910 uErr, GCPtrWhere);
5911}
5912
5913#ifdef IEM_WITH_SETJMP
5914/** \#PF(n) - 0e, longjmp. */
5915IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5916{
5917 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5918}
5919#endif
5920
5921
5922/** \#MF(0) - 10. */
5923DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5924{
5925 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5926}
5927
5928
5929/** \#AC(0) - 11. */
5930DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5931{
5932 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5933}
5934
5935
5936/**
5937 * Macro for calling iemCImplRaiseDivideError().
5938 *
5939 * This enables us to add/remove arguments and force different levels of
5940 * inlining as we wish.
5941 *
5942 * @return Strict VBox status code.
5943 */
5944#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5945IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5946{
5947 NOREF(cbInstr);
5948 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5949}
5950
5951
5952/**
5953 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5954 *
5955 * This enables us to add/remove arguments and force different levels of
5956 * inlining as we wish.
5957 *
5958 * @return Strict VBox status code.
5959 */
5960#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5961IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5962{
5963 NOREF(cbInstr);
5964 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5965}
5966
5967
5968/**
5969 * Macro for calling iemCImplRaiseInvalidOpcode().
5970 *
5971 * This enables us to add/remove arguments and force different levels of
5972 * inlining as we wish.
5973 *
5974 * @return Strict VBox status code.
5975 */
5976#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5977IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5978{
5979 NOREF(cbInstr);
5980 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5981}
5982
5983
5984/** @} */
5985
5986
5987/*
5988 *
5989 * Helpers routines.
5990 * Helpers routines.
5991 * Helpers routines.
5992 *
5993 */
5994
5995/**
5996 * Recalculates the effective operand size.
5997 *
5998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5999 */
6000IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6001{
6002 switch (pVCpu->iem.s.enmCpuMode)
6003 {
6004 case IEMMODE_16BIT:
6005 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6006 break;
6007 case IEMMODE_32BIT:
6008 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6009 break;
6010 case IEMMODE_64BIT:
6011 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6012 {
6013 case 0:
6014 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6015 break;
6016 case IEM_OP_PRF_SIZE_OP:
6017 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6018 break;
6019 case IEM_OP_PRF_SIZE_REX_W:
6020 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6021 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6022 break;
6023 }
6024 break;
6025 default:
6026 AssertFailed();
6027 }
6028}
6029
6030
6031/**
6032 * Sets the default operand size to 64-bit and recalculates the effective
6033 * operand size.
6034 *
6035 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6036 */
6037IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6038{
6039 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6040 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6041 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6042 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6043 else
6044 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6045}
6046
6047
6048/*
6049 *
6050 * Common opcode decoders.
6051 * Common opcode decoders.
6052 * Common opcode decoders.
6053 *
6054 */
6055//#include <iprt/mem.h>
6056
6057/**
6058 * Used to add extra details about a stub case.
6059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6060 */
6061IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6062{
6063#if defined(LOG_ENABLED) && defined(IN_RING3)
6064 PVM pVM = pVCpu->CTX_SUFF(pVM);
6065 char szRegs[4096];
6066 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6067 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6068 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6069 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6070 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6071 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6072 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6073 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6074 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6075 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6076 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6077 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6078 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6079 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6080 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6081 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6082 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6083 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6084 " efer=%016VR{efer}\n"
6085 " pat=%016VR{pat}\n"
6086 " sf_mask=%016VR{sf_mask}\n"
6087 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6088 " lstar=%016VR{lstar}\n"
6089 " star=%016VR{star} cstar=%016VR{cstar}\n"
6090 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6091 );
6092
6093 char szInstr[256];
6094 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6095 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6096 szInstr, sizeof(szInstr), NULL);
6097
6098 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6099#else
6100 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6101#endif
6102}
6103
6104/**
6105 * Complains about a stub.
6106 *
6107 * Providing two versions of this macro, one for daily use and one for use when
6108 * working on IEM.
6109 */
6110#if 0
6111# define IEMOP_BITCH_ABOUT_STUB() \
6112 do { \
6113 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6114 iemOpStubMsg2(pVCpu); \
6115 RTAssertPanic(); \
6116 } while (0)
6117#else
6118# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6119#endif
6120
6121/** Stubs an opcode. */
6122#define FNIEMOP_STUB(a_Name) \
6123 FNIEMOP_DEF(a_Name) \
6124 { \
6125 RT_NOREF_PV(pVCpu); \
6126 IEMOP_BITCH_ABOUT_STUB(); \
6127 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6128 } \
6129 typedef int ignore_semicolon
6130
6131/** Stubs an opcode. */
6132#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6133 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6134 { \
6135 RT_NOREF_PV(pVCpu); \
6136 RT_NOREF_PV(a_Name0); \
6137 IEMOP_BITCH_ABOUT_STUB(); \
6138 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6139 } \
6140 typedef int ignore_semicolon
6141
6142/** Stubs an opcode which currently should raise \#UD. */
6143#define FNIEMOP_UD_STUB(a_Name) \
6144 FNIEMOP_DEF(a_Name) \
6145 { \
6146 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6147 return IEMOP_RAISE_INVALID_OPCODE(); \
6148 } \
6149 typedef int ignore_semicolon
6150
6151/** Stubs an opcode which currently should raise \#UD. */
6152#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6153 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6154 { \
6155 RT_NOREF_PV(pVCpu); \
6156 RT_NOREF_PV(a_Name0); \
6157 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6158 return IEMOP_RAISE_INVALID_OPCODE(); \
6159 } \
6160 typedef int ignore_semicolon
6161
6162
6163
6164/** @name Register Access.
6165 * @{
6166 */
6167
6168/**
6169 * Gets a reference (pointer) to the specified hidden segment register.
6170 *
6171 * @returns Hidden register reference.
6172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6173 * @param iSegReg The segment register.
6174 */
6175IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6176{
6177 Assert(iSegReg < X86_SREG_COUNT);
6178 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6179 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6180
6181#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6182 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6183 { /* likely */ }
6184 else
6185 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6186#else
6187 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6188#endif
6189 return pSReg;
6190}
6191
6192
6193/**
6194 * Ensures that the given hidden segment register is up to date.
6195 *
6196 * @returns Hidden register reference.
6197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6198 * @param pSReg The segment register.
6199 */
6200IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6201{
6202#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6203 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6204 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6205#else
6206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6207 NOREF(pVCpu);
6208#endif
6209 return pSReg;
6210}
6211
6212
6213/**
6214 * Gets a reference (pointer) to the specified segment register (the selector
6215 * value).
6216 *
6217 * @returns Pointer to the selector variable.
6218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6219 * @param iSegReg The segment register.
6220 */
6221DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6222{
6223 Assert(iSegReg < X86_SREG_COUNT);
6224 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6225 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6226}
6227
6228
6229/**
6230 * Fetches the selector value of a segment register.
6231 *
6232 * @returns The selector value.
6233 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6234 * @param iSegReg The segment register.
6235 */
6236DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6237{
6238 Assert(iSegReg < X86_SREG_COUNT);
6239 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6240 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6241}
6242
6243
6244/**
6245 * Fetches the base address value of a segment register.
6246 *
6247 * @returns The selector value.
6248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6249 * @param iSegReg The segment register.
6250 */
6251DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6252{
6253 Assert(iSegReg < X86_SREG_COUNT);
6254 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6255 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6256}
6257
6258
6259/**
6260 * Gets a reference (pointer) to the specified general purpose register.
6261 *
6262 * @returns Register reference.
6263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6264 * @param iReg The general purpose register.
6265 */
6266DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6267{
6268 Assert(iReg < 16);
6269 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6270}
6271
6272
6273/**
6274 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6275 *
6276 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6277 *
6278 * @returns Register reference.
6279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6280 * @param iReg The register.
6281 */
6282DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6283{
6284 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6285 {
6286 Assert(iReg < 16);
6287 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6288 }
6289 /* high 8-bit register. */
6290 Assert(iReg < 8);
6291 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6292}
6293
6294
6295/**
6296 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6297 *
6298 * @returns Register reference.
6299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6300 * @param iReg The register.
6301 */
6302DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6303{
6304 Assert(iReg < 16);
6305 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6306}
6307
6308
6309/**
6310 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6311 *
6312 * @returns Register reference.
6313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6314 * @param iReg The register.
6315 */
6316DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6317{
6318 Assert(iReg < 16);
6319 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6320}
6321
6322
6323/**
6324 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6325 *
6326 * @returns Register reference.
6327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6328 * @param iReg The register.
6329 */
6330DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6331{
6332 Assert(iReg < 64);
6333 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6334}
6335
6336
6337/**
6338 * Gets a reference (pointer) to the specified segment register's base address.
6339 *
6340 * @returns Segment register base address reference.
6341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6342 * @param iSegReg The segment selector.
6343 */
6344DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6345{
6346 Assert(iSegReg < X86_SREG_COUNT);
6347 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6348 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6349}
6350
6351
6352/**
6353 * Fetches the value of a 8-bit general purpose register.
6354 *
6355 * @returns The register value.
6356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6357 * @param iReg The register.
6358 */
6359DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6360{
6361 return *iemGRegRefU8(pVCpu, iReg);
6362}
6363
6364
6365/**
6366 * Fetches the value of a 16-bit general purpose register.
6367 *
6368 * @returns The register value.
6369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6370 * @param iReg The register.
6371 */
6372DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6373{
6374 Assert(iReg < 16);
6375 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6376}
6377
6378
6379/**
6380 * Fetches the value of a 32-bit general purpose register.
6381 *
6382 * @returns The register value.
6383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6384 * @param iReg The register.
6385 */
6386DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6387{
6388 Assert(iReg < 16);
6389 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6390}
6391
6392
6393/**
6394 * Fetches the value of a 64-bit general purpose register.
6395 *
6396 * @returns The register value.
6397 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6398 * @param iReg The register.
6399 */
6400DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6401{
6402 Assert(iReg < 16);
6403 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6404}
6405
6406
6407/**
6408 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6409 *
6410 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6411 * segment limit.
6412 *
6413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6414 * @param offNextInstr The offset of the next instruction.
6415 */
6416IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6417{
6418 switch (pVCpu->iem.s.enmEffOpSize)
6419 {
6420 case IEMMODE_16BIT:
6421 {
6422 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6423 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6424 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6425 return iemRaiseGeneralProtectionFault0(pVCpu);
6426 pVCpu->cpum.GstCtx.rip = uNewIp;
6427 break;
6428 }
6429
6430 case IEMMODE_32BIT:
6431 {
6432 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6433 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6434
6435 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6436 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6437 return iemRaiseGeneralProtectionFault0(pVCpu);
6438 pVCpu->cpum.GstCtx.rip = uNewEip;
6439 break;
6440 }
6441
6442 case IEMMODE_64BIT:
6443 {
6444 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6445
6446 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6447 if (!IEM_IS_CANONICAL(uNewRip))
6448 return iemRaiseGeneralProtectionFault0(pVCpu);
6449 pVCpu->cpum.GstCtx.rip = uNewRip;
6450 break;
6451 }
6452
6453 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6454 }
6455
6456 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6457
6458#ifndef IEM_WITH_CODE_TLB
6459 /* Flush the prefetch buffer. */
6460 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6461#endif
6462
6463 return VINF_SUCCESS;
6464}
6465
6466
6467/**
6468 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6469 *
6470 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6471 * segment limit.
6472 *
6473 * @returns Strict VBox status code.
6474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6475 * @param offNextInstr The offset of the next instruction.
6476 */
6477IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6478{
6479 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6480
6481 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6482 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6483 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6484 return iemRaiseGeneralProtectionFault0(pVCpu);
6485 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6486 pVCpu->cpum.GstCtx.rip = uNewIp;
6487 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6488
6489#ifndef IEM_WITH_CODE_TLB
6490 /* Flush the prefetch buffer. */
6491 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6492#endif
6493
6494 return VINF_SUCCESS;
6495}
6496
6497
6498/**
6499 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6500 *
6501 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6502 * segment limit.
6503 *
6504 * @returns Strict VBox status code.
6505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6506 * @param offNextInstr The offset of the next instruction.
6507 */
6508IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6509{
6510 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6511
6512 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6513 {
6514 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6515
6516 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6517 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6518 return iemRaiseGeneralProtectionFault0(pVCpu);
6519 pVCpu->cpum.GstCtx.rip = uNewEip;
6520 }
6521 else
6522 {
6523 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6524
6525 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6526 if (!IEM_IS_CANONICAL(uNewRip))
6527 return iemRaiseGeneralProtectionFault0(pVCpu);
6528 pVCpu->cpum.GstCtx.rip = uNewRip;
6529 }
6530 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6531
6532#ifndef IEM_WITH_CODE_TLB
6533 /* Flush the prefetch buffer. */
6534 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6535#endif
6536
6537 return VINF_SUCCESS;
6538}
6539
6540
6541/**
6542 * Performs a near jump to the specified address.
6543 *
6544 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6545 * segment limit.
6546 *
6547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6548 * @param uNewRip The new RIP value.
6549 */
6550IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6551{
6552 switch (pVCpu->iem.s.enmEffOpSize)
6553 {
6554 case IEMMODE_16BIT:
6555 {
6556 Assert(uNewRip <= UINT16_MAX);
6557 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6558 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6559 return iemRaiseGeneralProtectionFault0(pVCpu);
6560 /** @todo Test 16-bit jump in 64-bit mode. */
6561 pVCpu->cpum.GstCtx.rip = uNewRip;
6562 break;
6563 }
6564
6565 case IEMMODE_32BIT:
6566 {
6567 Assert(uNewRip <= UINT32_MAX);
6568 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6569 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6570
6571 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6572 return iemRaiseGeneralProtectionFault0(pVCpu);
6573 pVCpu->cpum.GstCtx.rip = uNewRip;
6574 break;
6575 }
6576
6577 case IEMMODE_64BIT:
6578 {
6579 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6580
6581 if (!IEM_IS_CANONICAL(uNewRip))
6582 return iemRaiseGeneralProtectionFault0(pVCpu);
6583 pVCpu->cpum.GstCtx.rip = uNewRip;
6584 break;
6585 }
6586
6587 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6588 }
6589
6590 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6591
6592#ifndef IEM_WITH_CODE_TLB
6593 /* Flush the prefetch buffer. */
6594 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6595#endif
6596
6597 return VINF_SUCCESS;
6598}
6599
6600
6601/**
6602 * Get the address of the top of the stack.
6603 *
6604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6605 */
6606DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6607{
6608 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6609 return pVCpu->cpum.GstCtx.rsp;
6610 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6611 return pVCpu->cpum.GstCtx.esp;
6612 return pVCpu->cpum.GstCtx.sp;
6613}
6614
6615
6616/**
6617 * Updates the RIP/EIP/IP to point to the next instruction.
6618 *
6619 * This function leaves the EFLAGS.RF flag alone.
6620 *
6621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6622 * @param cbInstr The number of bytes to add.
6623 */
6624IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6625{
6626 switch (pVCpu->iem.s.enmCpuMode)
6627 {
6628 case IEMMODE_16BIT:
6629 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6630 pVCpu->cpum.GstCtx.eip += cbInstr;
6631 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6632 break;
6633
6634 case IEMMODE_32BIT:
6635 pVCpu->cpum.GstCtx.eip += cbInstr;
6636 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6637 break;
6638
6639 case IEMMODE_64BIT:
6640 pVCpu->cpum.GstCtx.rip += cbInstr;
6641 break;
6642 default: AssertFailed();
6643 }
6644}
6645
6646
6647#if 0
6648/**
6649 * Updates the RIP/EIP/IP to point to the next instruction.
6650 *
6651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6652 */
6653IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6654{
6655 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6656}
6657#endif
6658
6659
6660
6661/**
6662 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6663 *
6664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6665 * @param cbInstr The number of bytes to add.
6666 */
6667IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6668{
6669 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6670
6671 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6672#if ARCH_BITS >= 64
6673 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6674 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6675 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6676#else
6677 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6678 pVCpu->cpum.GstCtx.rip += cbInstr;
6679 else
6680 pVCpu->cpum.GstCtx.eip += cbInstr;
6681#endif
6682}
6683
6684
6685/**
6686 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6687 *
6688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6689 */
6690IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6691{
6692 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6693}
6694
6695
6696/**
6697 * Adds to the stack pointer.
6698 *
6699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6700 * @param cbToAdd The number of bytes to add (8-bit!).
6701 */
6702DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6703{
6704 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6705 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6706 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6707 pVCpu->cpum.GstCtx.esp += cbToAdd;
6708 else
6709 pVCpu->cpum.GstCtx.sp += cbToAdd;
6710}
6711
6712
6713/**
6714 * Subtracts from the stack pointer.
6715 *
6716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6717 * @param cbToSub The number of bytes to subtract (8-bit!).
6718 */
6719DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6720{
6721 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6722 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6723 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6724 pVCpu->cpum.GstCtx.esp -= cbToSub;
6725 else
6726 pVCpu->cpum.GstCtx.sp -= cbToSub;
6727}
6728
6729
6730/**
6731 * Adds to the temporary stack pointer.
6732 *
6733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6734 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6735 * @param cbToAdd The number of bytes to add (16-bit).
6736 */
6737DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6738{
6739 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6740 pTmpRsp->u += cbToAdd;
6741 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6742 pTmpRsp->DWords.dw0 += cbToAdd;
6743 else
6744 pTmpRsp->Words.w0 += cbToAdd;
6745}
6746
6747
6748/**
6749 * Subtracts from the temporary stack pointer.
6750 *
6751 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6752 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6753 * @param cbToSub The number of bytes to subtract.
6754 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6755 * expecting that.
6756 */
6757DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6758{
6759 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6760 pTmpRsp->u -= cbToSub;
6761 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6762 pTmpRsp->DWords.dw0 -= cbToSub;
6763 else
6764 pTmpRsp->Words.w0 -= cbToSub;
6765}
6766
6767
6768/**
6769 * Calculates the effective stack address for a push of the specified size as
6770 * well as the new RSP value (upper bits may be masked).
6771 *
6772 * @returns Effective stack addressf for the push.
6773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6774 * @param cbItem The size of the stack item to pop.
6775 * @param puNewRsp Where to return the new RSP value.
6776 */
6777DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6778{
6779 RTUINT64U uTmpRsp;
6780 RTGCPTR GCPtrTop;
6781 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6782
6783 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6784 GCPtrTop = uTmpRsp.u -= cbItem;
6785 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6786 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6787 else
6788 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6789 *puNewRsp = uTmpRsp.u;
6790 return GCPtrTop;
6791}
6792
6793
6794/**
6795 * Gets the current stack pointer and calculates the value after a pop of the
6796 * specified size.
6797 *
6798 * @returns Current stack pointer.
6799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6800 * @param cbItem The size of the stack item to pop.
6801 * @param puNewRsp Where to return the new RSP value.
6802 */
6803DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6804{
6805 RTUINT64U uTmpRsp;
6806 RTGCPTR GCPtrTop;
6807 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6808
6809 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6810 {
6811 GCPtrTop = uTmpRsp.u;
6812 uTmpRsp.u += cbItem;
6813 }
6814 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6815 {
6816 GCPtrTop = uTmpRsp.DWords.dw0;
6817 uTmpRsp.DWords.dw0 += cbItem;
6818 }
6819 else
6820 {
6821 GCPtrTop = uTmpRsp.Words.w0;
6822 uTmpRsp.Words.w0 += cbItem;
6823 }
6824 *puNewRsp = uTmpRsp.u;
6825 return GCPtrTop;
6826}
6827
6828
6829/**
6830 * Calculates the effective stack address for a push of the specified size as
6831 * well as the new temporary RSP value (upper bits may be masked).
6832 *
6833 * @returns Effective stack addressf for the push.
6834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6835 * @param pTmpRsp The temporary stack pointer. This is updated.
6836 * @param cbItem The size of the stack item to pop.
6837 */
6838DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6839{
6840 RTGCPTR GCPtrTop;
6841
6842 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6843 GCPtrTop = pTmpRsp->u -= cbItem;
6844 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6845 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6846 else
6847 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6848 return GCPtrTop;
6849}
6850
6851
6852/**
6853 * Gets the effective stack address for a pop of the specified size and
6854 * calculates and updates the temporary RSP.
6855 *
6856 * @returns Current stack pointer.
6857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6858 * @param pTmpRsp The temporary stack pointer. This is updated.
6859 * @param cbItem The size of the stack item to pop.
6860 */
6861DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6862{
6863 RTGCPTR GCPtrTop;
6864 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6865 {
6866 GCPtrTop = pTmpRsp->u;
6867 pTmpRsp->u += cbItem;
6868 }
6869 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6870 {
6871 GCPtrTop = pTmpRsp->DWords.dw0;
6872 pTmpRsp->DWords.dw0 += cbItem;
6873 }
6874 else
6875 {
6876 GCPtrTop = pTmpRsp->Words.w0;
6877 pTmpRsp->Words.w0 += cbItem;
6878 }
6879 return GCPtrTop;
6880}
6881
6882/** @} */
6883
6884
6885/** @name FPU access and helpers.
6886 *
6887 * @{
6888 */
6889
6890
6891/**
6892 * Hook for preparing to use the host FPU.
6893 *
6894 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6895 *
6896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6897 */
6898DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6899{
6900#ifdef IN_RING3
6901 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6902#else
6903 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6904#endif
6905 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6906}
6907
6908
6909/**
6910 * Hook for preparing to use the host FPU for SSE.
6911 *
6912 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6913 *
6914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6915 */
6916DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6917{
6918 iemFpuPrepareUsage(pVCpu);
6919}
6920
6921
6922/**
6923 * Hook for preparing to use the host FPU for AVX.
6924 *
6925 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6926 *
6927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6928 */
6929DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6930{
6931 iemFpuPrepareUsage(pVCpu);
6932}
6933
6934
6935/**
6936 * Hook for actualizing the guest FPU state before the interpreter reads it.
6937 *
6938 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6939 *
6940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6941 */
6942DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6943{
6944#ifdef IN_RING3
6945 NOREF(pVCpu);
6946#else
6947 CPUMRZFpuStateActualizeForRead(pVCpu);
6948#endif
6949 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6950}
6951
6952
6953/**
6954 * Hook for actualizing the guest FPU state before the interpreter changes it.
6955 *
6956 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6957 *
6958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6959 */
6960DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6961{
6962#ifdef IN_RING3
6963 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6964#else
6965 CPUMRZFpuStateActualizeForChange(pVCpu);
6966#endif
6967 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6968}
6969
6970
6971/**
6972 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6973 * only.
6974 *
6975 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6976 *
6977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6978 */
6979DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6980{
6981#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6982 NOREF(pVCpu);
6983#else
6984 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6985#endif
6986 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6987}
6988
6989
6990/**
6991 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6992 * read+write.
6993 *
6994 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6995 *
6996 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6997 */
6998DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6999{
7000#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7001 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7002#else
7003 CPUMRZFpuStateActualizeForChange(pVCpu);
7004#endif
7005 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7006}
7007
7008
7009/**
7010 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7011 * only.
7012 *
7013 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7014 *
7015 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7016 */
7017DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7018{
7019#ifdef IN_RING3
7020 NOREF(pVCpu);
7021#else
7022 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7023#endif
7024 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7025}
7026
7027
7028/**
7029 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7030 * read+write.
7031 *
7032 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7033 *
7034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7035 */
7036DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7037{
7038#ifdef IN_RING3
7039 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7040#else
7041 CPUMRZFpuStateActualizeForChange(pVCpu);
7042#endif
7043 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7044}
7045
7046
7047/**
7048 * Stores a QNaN value into a FPU register.
7049 *
7050 * @param pReg Pointer to the register.
7051 */
7052DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7053{
7054 pReg->au32[0] = UINT32_C(0x00000000);
7055 pReg->au32[1] = UINT32_C(0xc0000000);
7056 pReg->au16[4] = UINT16_C(0xffff);
7057}
7058
7059
7060/**
7061 * Updates the FOP, FPU.CS and FPUIP registers.
7062 *
7063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7064 * @param pFpuCtx The FPU context.
7065 */
7066DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7067{
7068 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7069 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7070 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7071 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7072 {
7073 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7074 * happens in real mode here based on the fnsave and fnstenv images. */
7075 pFpuCtx->CS = 0;
7076 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7077 }
7078 else
7079 {
7080 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7081 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7082 }
7083}
7084
7085
7086/**
7087 * Updates the x87.DS and FPUDP registers.
7088 *
7089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7090 * @param pFpuCtx The FPU context.
7091 * @param iEffSeg The effective segment register.
7092 * @param GCPtrEff The effective address relative to @a iEffSeg.
7093 */
7094DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7095{
7096 RTSEL sel;
7097 switch (iEffSeg)
7098 {
7099 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7100 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7101 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7102 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7103 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7104 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7105 default:
7106 AssertMsgFailed(("%d\n", iEffSeg));
7107 sel = pVCpu->cpum.GstCtx.ds.Sel;
7108 }
7109 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7110 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7111 {
7112 pFpuCtx->DS = 0;
7113 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7114 }
7115 else
7116 {
7117 pFpuCtx->DS = sel;
7118 pFpuCtx->FPUDP = GCPtrEff;
7119 }
7120}
7121
7122
7123/**
7124 * Rotates the stack registers in the push direction.
7125 *
7126 * @param pFpuCtx The FPU context.
7127 * @remarks This is a complete waste of time, but fxsave stores the registers in
7128 * stack order.
7129 */
7130DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7131{
7132 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7133 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7134 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7135 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7136 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7137 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7138 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7139 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7140 pFpuCtx->aRegs[0].r80 = r80Tmp;
7141}
7142
7143
7144/**
7145 * Rotates the stack registers in the pop direction.
7146 *
7147 * @param pFpuCtx The FPU context.
7148 * @remarks This is a complete waste of time, but fxsave stores the registers in
7149 * stack order.
7150 */
7151DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7152{
7153 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7154 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7155 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7156 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7157 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7158 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7159 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7160 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7161 pFpuCtx->aRegs[7].r80 = r80Tmp;
7162}
7163
7164
7165/**
7166 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7167 * exception prevents it.
7168 *
7169 * @param pResult The FPU operation result to push.
7170 * @param pFpuCtx The FPU context.
7171 */
7172IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7173{
7174 /* Update FSW and bail if there are pending exceptions afterwards. */
7175 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7176 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7177 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7178 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7179 {
7180 pFpuCtx->FSW = fFsw;
7181 return;
7182 }
7183
7184 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7185 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7186 {
7187 /* All is fine, push the actual value. */
7188 pFpuCtx->FTW |= RT_BIT(iNewTop);
7189 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7190 }
7191 else if (pFpuCtx->FCW & X86_FCW_IM)
7192 {
7193 /* Masked stack overflow, push QNaN. */
7194 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7195 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7196 }
7197 else
7198 {
7199 /* Raise stack overflow, don't push anything. */
7200 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7201 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7202 return;
7203 }
7204
7205 fFsw &= ~X86_FSW_TOP_MASK;
7206 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7207 pFpuCtx->FSW = fFsw;
7208
7209 iemFpuRotateStackPush(pFpuCtx);
7210}
7211
7212
7213/**
7214 * Stores a result in a FPU register and updates the FSW and FTW.
7215 *
7216 * @param pFpuCtx The FPU context.
7217 * @param pResult The result to store.
7218 * @param iStReg Which FPU register to store it in.
7219 */
7220IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7221{
7222 Assert(iStReg < 8);
7223 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7224 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7225 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7226 pFpuCtx->FTW |= RT_BIT(iReg);
7227 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7228}
7229
7230
7231/**
7232 * Only updates the FPU status word (FSW) with the result of the current
7233 * instruction.
7234 *
7235 * @param pFpuCtx The FPU context.
7236 * @param u16FSW The FSW output of the current instruction.
7237 */
7238IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7239{
7240 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7241 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7242}
7243
7244
7245/**
7246 * Pops one item off the FPU stack if no pending exception prevents it.
7247 *
7248 * @param pFpuCtx The FPU context.
7249 */
7250IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7251{
7252 /* Check pending exceptions. */
7253 uint16_t uFSW = pFpuCtx->FSW;
7254 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7255 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7256 return;
7257
7258 /* TOP--. */
7259 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7260 uFSW &= ~X86_FSW_TOP_MASK;
7261 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7262 pFpuCtx->FSW = uFSW;
7263
7264 /* Mark the previous ST0 as empty. */
7265 iOldTop >>= X86_FSW_TOP_SHIFT;
7266 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7267
7268 /* Rotate the registers. */
7269 iemFpuRotateStackPop(pFpuCtx);
7270}
7271
7272
7273/**
7274 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7275 *
7276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7277 * @param pResult The FPU operation result to push.
7278 */
7279IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7280{
7281 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7282 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7283 iemFpuMaybePushResult(pResult, pFpuCtx);
7284}
7285
7286
7287/**
7288 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7289 * and sets FPUDP and FPUDS.
7290 *
7291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7292 * @param pResult The FPU operation result to push.
7293 * @param iEffSeg The effective segment register.
7294 * @param GCPtrEff The effective address relative to @a iEffSeg.
7295 */
7296IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7297{
7298 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7299 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7300 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7301 iemFpuMaybePushResult(pResult, pFpuCtx);
7302}
7303
7304
7305/**
7306 * Replace ST0 with the first value and push the second onto the FPU stack,
7307 * unless a pending exception prevents it.
7308 *
7309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7310 * @param pResult The FPU operation result to store and push.
7311 */
7312IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7313{
7314 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7315 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7316
7317 /* Update FSW and bail if there are pending exceptions afterwards. */
7318 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7319 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7320 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7321 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7322 {
7323 pFpuCtx->FSW = fFsw;
7324 return;
7325 }
7326
7327 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7328 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7329 {
7330 /* All is fine, push the actual value. */
7331 pFpuCtx->FTW |= RT_BIT(iNewTop);
7332 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7333 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7334 }
7335 else if (pFpuCtx->FCW & X86_FCW_IM)
7336 {
7337 /* Masked stack overflow, push QNaN. */
7338 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7339 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7340 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7341 }
7342 else
7343 {
7344 /* Raise stack overflow, don't push anything. */
7345 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7346 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7347 return;
7348 }
7349
7350 fFsw &= ~X86_FSW_TOP_MASK;
7351 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7352 pFpuCtx->FSW = fFsw;
7353
7354 iemFpuRotateStackPush(pFpuCtx);
7355}
7356
7357
7358/**
7359 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7360 * FOP.
7361 *
7362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7363 * @param pResult The result to store.
7364 * @param iStReg Which FPU register to store it in.
7365 */
7366IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7367{
7368 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7369 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7370 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7371}
7372
7373
7374/**
7375 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7376 * FOP, and then pops the stack.
7377 *
7378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7379 * @param pResult The result to store.
7380 * @param iStReg Which FPU register to store it in.
7381 */
7382IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7383{
7384 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7385 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7386 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7387 iemFpuMaybePopOne(pFpuCtx);
7388}
7389
7390
7391/**
7392 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7393 * FPUDP, and FPUDS.
7394 *
7395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7396 * @param pResult The result to store.
7397 * @param iStReg Which FPU register to store it in.
7398 * @param iEffSeg The effective memory operand selector register.
7399 * @param GCPtrEff The effective memory operand offset.
7400 */
7401IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7402 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7403{
7404 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7405 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7406 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7407 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7408}
7409
7410
7411/**
7412 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7413 * FPUDP, and FPUDS, and then pops the stack.
7414 *
7415 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7416 * @param pResult The result to store.
7417 * @param iStReg Which FPU register to store it in.
7418 * @param iEffSeg The effective memory operand selector register.
7419 * @param GCPtrEff The effective memory operand offset.
7420 */
7421IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7422 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7423{
7424 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7425 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7426 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7427 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7428 iemFpuMaybePopOne(pFpuCtx);
7429}
7430
7431
7432/**
7433 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7434 *
7435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7436 */
7437IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7438{
7439 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7440 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7441}
7442
7443
7444/**
7445 * Marks the specified stack register as free (for FFREE).
7446 *
7447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7448 * @param iStReg The register to free.
7449 */
7450IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7451{
7452 Assert(iStReg < 8);
7453 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7454 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7455 pFpuCtx->FTW &= ~RT_BIT(iReg);
7456}
7457
7458
7459/**
7460 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7461 *
7462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7463 */
7464IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7465{
7466 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7467 uint16_t uFsw = pFpuCtx->FSW;
7468 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7469 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7470 uFsw &= ~X86_FSW_TOP_MASK;
7471 uFsw |= uTop;
7472 pFpuCtx->FSW = uFsw;
7473}
7474
7475
7476/**
7477 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7478 *
7479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7480 */
7481IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7482{
7483 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7484 uint16_t uFsw = pFpuCtx->FSW;
7485 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7486 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7487 uFsw &= ~X86_FSW_TOP_MASK;
7488 uFsw |= uTop;
7489 pFpuCtx->FSW = uFsw;
7490}
7491
7492
7493/**
7494 * Updates the FSW, FOP, FPUIP, and FPUCS.
7495 *
7496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7497 * @param u16FSW The FSW from the current instruction.
7498 */
7499IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7500{
7501 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7502 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7503 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7504}
7505
7506
7507/**
7508 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7509 *
7510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7511 * @param u16FSW The FSW from the current instruction.
7512 */
7513IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7514{
7515 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7516 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7517 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7518 iemFpuMaybePopOne(pFpuCtx);
7519}
7520
7521
7522/**
7523 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7524 *
7525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7526 * @param u16FSW The FSW from the current instruction.
7527 * @param iEffSeg The effective memory operand selector register.
7528 * @param GCPtrEff The effective memory operand offset.
7529 */
7530IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7531{
7532 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7533 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7534 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7535 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7536}
7537
7538
7539/**
7540 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7541 *
7542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7543 * @param u16FSW The FSW from the current instruction.
7544 */
7545IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7546{
7547 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7548 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7549 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7550 iemFpuMaybePopOne(pFpuCtx);
7551 iemFpuMaybePopOne(pFpuCtx);
7552}
7553
7554
7555/**
7556 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7557 *
7558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7559 * @param u16FSW The FSW from the current instruction.
7560 * @param iEffSeg The effective memory operand selector register.
7561 * @param GCPtrEff The effective memory operand offset.
7562 */
7563IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7564{
7565 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7566 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7567 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7568 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7569 iemFpuMaybePopOne(pFpuCtx);
7570}
7571
7572
7573/**
7574 * Worker routine for raising an FPU stack underflow exception.
7575 *
7576 * @param pFpuCtx The FPU context.
7577 * @param iStReg The stack register being accessed.
7578 */
7579IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7580{
7581 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7582 if (pFpuCtx->FCW & X86_FCW_IM)
7583 {
7584 /* Masked underflow. */
7585 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7586 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7587 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7588 if (iStReg != UINT8_MAX)
7589 {
7590 pFpuCtx->FTW |= RT_BIT(iReg);
7591 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7592 }
7593 }
7594 else
7595 {
7596 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7597 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7598 }
7599}
7600
7601
7602/**
7603 * Raises a FPU stack underflow exception.
7604 *
7605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7606 * @param iStReg The destination register that should be loaded
7607 * with QNaN if \#IS is not masked. Specify
7608 * UINT8_MAX if none (like for fcom).
7609 */
7610DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7611{
7612 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7613 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7614 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7615}
7616
7617
7618DECL_NO_INLINE(IEM_STATIC, void)
7619iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7620{
7621 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7622 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7623 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7624 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7625}
7626
7627
7628DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7629{
7630 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7631 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7632 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7633 iemFpuMaybePopOne(pFpuCtx);
7634}
7635
7636
7637DECL_NO_INLINE(IEM_STATIC, void)
7638iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7639{
7640 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7641 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7642 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7643 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7644 iemFpuMaybePopOne(pFpuCtx);
7645}
7646
7647
7648DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7649{
7650 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7651 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7652 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7653 iemFpuMaybePopOne(pFpuCtx);
7654 iemFpuMaybePopOne(pFpuCtx);
7655}
7656
7657
7658DECL_NO_INLINE(IEM_STATIC, void)
7659iemFpuStackPushUnderflow(PVMCPU pVCpu)
7660{
7661 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7662 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7663
7664 if (pFpuCtx->FCW & X86_FCW_IM)
7665 {
7666 /* Masked overflow - Push QNaN. */
7667 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7668 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7669 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7670 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7671 pFpuCtx->FTW |= RT_BIT(iNewTop);
7672 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7673 iemFpuRotateStackPush(pFpuCtx);
7674 }
7675 else
7676 {
7677 /* Exception pending - don't change TOP or the register stack. */
7678 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7679 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7680 }
7681}
7682
7683
7684DECL_NO_INLINE(IEM_STATIC, void)
7685iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7686{
7687 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7688 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7689
7690 if (pFpuCtx->FCW & X86_FCW_IM)
7691 {
7692 /* Masked overflow - Push QNaN. */
7693 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7694 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7695 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7696 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7697 pFpuCtx->FTW |= RT_BIT(iNewTop);
7698 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7699 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7700 iemFpuRotateStackPush(pFpuCtx);
7701 }
7702 else
7703 {
7704 /* Exception pending - don't change TOP or the register stack. */
7705 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7706 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7707 }
7708}
7709
7710
7711/**
7712 * Worker routine for raising an FPU stack overflow exception on a push.
7713 *
7714 * @param pFpuCtx The FPU context.
7715 */
7716IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7717{
7718 if (pFpuCtx->FCW & X86_FCW_IM)
7719 {
7720 /* Masked overflow. */
7721 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7722 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7723 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7724 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7725 pFpuCtx->FTW |= RT_BIT(iNewTop);
7726 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7727 iemFpuRotateStackPush(pFpuCtx);
7728 }
7729 else
7730 {
7731 /* Exception pending - don't change TOP or the register stack. */
7732 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7733 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7734 }
7735}
7736
7737
7738/**
7739 * Raises a FPU stack overflow exception on a push.
7740 *
7741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7742 */
7743DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7744{
7745 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7746 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7747 iemFpuStackPushOverflowOnly(pFpuCtx);
7748}
7749
7750
7751/**
7752 * Raises a FPU stack overflow exception on a push with a memory operand.
7753 *
7754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7755 * @param iEffSeg The effective memory operand selector register.
7756 * @param GCPtrEff The effective memory operand offset.
7757 */
7758DECL_NO_INLINE(IEM_STATIC, void)
7759iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7760{
7761 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7762 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7763 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7764 iemFpuStackPushOverflowOnly(pFpuCtx);
7765}
7766
7767
7768IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7769{
7770 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7771 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7772 if (pFpuCtx->FTW & RT_BIT(iReg))
7773 return VINF_SUCCESS;
7774 return VERR_NOT_FOUND;
7775}
7776
7777
7778IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7779{
7780 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7781 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7782 if (pFpuCtx->FTW & RT_BIT(iReg))
7783 {
7784 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7785 return VINF_SUCCESS;
7786 }
7787 return VERR_NOT_FOUND;
7788}
7789
7790
7791IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7792 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7793{
7794 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7795 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7796 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7797 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7798 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7799 {
7800 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7801 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7802 return VINF_SUCCESS;
7803 }
7804 return VERR_NOT_FOUND;
7805}
7806
7807
7808IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7809{
7810 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7811 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7812 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7813 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7814 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7815 {
7816 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7817 return VINF_SUCCESS;
7818 }
7819 return VERR_NOT_FOUND;
7820}
7821
7822
7823/**
7824 * Updates the FPU exception status after FCW is changed.
7825 *
7826 * @param pFpuCtx The FPU context.
7827 */
7828IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7829{
7830 uint16_t u16Fsw = pFpuCtx->FSW;
7831 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7832 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7833 else
7834 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7835 pFpuCtx->FSW = u16Fsw;
7836}
7837
7838
7839/**
7840 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7841 *
7842 * @returns The full FTW.
7843 * @param pFpuCtx The FPU context.
7844 */
7845IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7846{
7847 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7848 uint16_t u16Ftw = 0;
7849 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7850 for (unsigned iSt = 0; iSt < 8; iSt++)
7851 {
7852 unsigned const iReg = (iSt + iTop) & 7;
7853 if (!(u8Ftw & RT_BIT(iReg)))
7854 u16Ftw |= 3 << (iReg * 2); /* empty */
7855 else
7856 {
7857 uint16_t uTag;
7858 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7859 if (pr80Reg->s.uExponent == 0x7fff)
7860 uTag = 2; /* Exponent is all 1's => Special. */
7861 else if (pr80Reg->s.uExponent == 0x0000)
7862 {
7863 if (pr80Reg->s.u64Mantissa == 0x0000)
7864 uTag = 1; /* All bits are zero => Zero. */
7865 else
7866 uTag = 2; /* Must be special. */
7867 }
7868 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7869 uTag = 0; /* Valid. */
7870 else
7871 uTag = 2; /* Must be special. */
7872
7873 u16Ftw |= uTag << (iReg * 2); /* empty */
7874 }
7875 }
7876
7877 return u16Ftw;
7878}
7879
7880
7881/**
7882 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7883 *
7884 * @returns The compressed FTW.
7885 * @param u16FullFtw The full FTW to convert.
7886 */
7887IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7888{
7889 uint8_t u8Ftw = 0;
7890 for (unsigned i = 0; i < 8; i++)
7891 {
7892 if ((u16FullFtw & 3) != 3 /*empty*/)
7893 u8Ftw |= RT_BIT(i);
7894 u16FullFtw >>= 2;
7895 }
7896
7897 return u8Ftw;
7898}
7899
7900/** @} */
7901
7902
7903/** @name Memory access.
7904 *
7905 * @{
7906 */
7907
7908
7909/**
7910 * Updates the IEMCPU::cbWritten counter if applicable.
7911 *
7912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7913 * @param fAccess The access being accounted for.
7914 * @param cbMem The access size.
7915 */
7916DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7917{
7918 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7919 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7920 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7921}
7922
7923
7924/**
7925 * Checks if the given segment can be written to, raise the appropriate
7926 * exception if not.
7927 *
7928 * @returns VBox strict status code.
7929 *
7930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7931 * @param pHid Pointer to the hidden register.
7932 * @param iSegReg The register number.
7933 * @param pu64BaseAddr Where to return the base address to use for the
7934 * segment. (In 64-bit code it may differ from the
7935 * base in the hidden segment.)
7936 */
7937IEM_STATIC VBOXSTRICTRC
7938iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7939{
7940 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7941
7942 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7943 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7944 else
7945 {
7946 if (!pHid->Attr.n.u1Present)
7947 {
7948 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7949 AssertRelease(uSel == 0);
7950 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7951 return iemRaiseGeneralProtectionFault0(pVCpu);
7952 }
7953
7954 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7955 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7956 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7957 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7958 *pu64BaseAddr = pHid->u64Base;
7959 }
7960 return VINF_SUCCESS;
7961}
7962
7963
7964/**
7965 * Checks if the given segment can be read from, raise the appropriate
7966 * exception if not.
7967 *
7968 * @returns VBox strict status code.
7969 *
7970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7971 * @param pHid Pointer to the hidden register.
7972 * @param iSegReg The register number.
7973 * @param pu64BaseAddr Where to return the base address to use for the
7974 * segment. (In 64-bit code it may differ from the
7975 * base in the hidden segment.)
7976 */
7977IEM_STATIC VBOXSTRICTRC
7978iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7979{
7980 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7981
7982 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7983 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7984 else
7985 {
7986 if (!pHid->Attr.n.u1Present)
7987 {
7988 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7989 AssertRelease(uSel == 0);
7990 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7991 return iemRaiseGeneralProtectionFault0(pVCpu);
7992 }
7993
7994 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7995 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7996 *pu64BaseAddr = pHid->u64Base;
7997 }
7998 return VINF_SUCCESS;
7999}
8000
8001
8002/**
8003 * Applies the segment limit, base and attributes.
8004 *
8005 * This may raise a \#GP or \#SS.
8006 *
8007 * @returns VBox strict status code.
8008 *
8009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8010 * @param fAccess The kind of access which is being performed.
8011 * @param iSegReg The index of the segment register to apply.
8012 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8013 * TSS, ++).
8014 * @param cbMem The access size.
8015 * @param pGCPtrMem Pointer to the guest memory address to apply
8016 * segmentation to. Input and output parameter.
8017 */
8018IEM_STATIC VBOXSTRICTRC
8019iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8020{
8021 if (iSegReg == UINT8_MAX)
8022 return VINF_SUCCESS;
8023
8024 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8025 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8026 switch (pVCpu->iem.s.enmCpuMode)
8027 {
8028 case IEMMODE_16BIT:
8029 case IEMMODE_32BIT:
8030 {
8031 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8032 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8033
8034 if ( pSel->Attr.n.u1Present
8035 && !pSel->Attr.n.u1Unusable)
8036 {
8037 Assert(pSel->Attr.n.u1DescType);
8038 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8039 {
8040 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8041 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8042 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8043
8044 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8045 {
8046 /** @todo CPL check. */
8047 }
8048
8049 /*
8050 * There are two kinds of data selectors, normal and expand down.
8051 */
8052 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8053 {
8054 if ( GCPtrFirst32 > pSel->u32Limit
8055 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8056 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8057 }
8058 else
8059 {
8060 /*
8061 * The upper boundary is defined by the B bit, not the G bit!
8062 */
8063 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8064 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8065 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8066 }
8067 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8068 }
8069 else
8070 {
8071
8072 /*
8073 * Code selector and usually be used to read thru, writing is
8074 * only permitted in real and V8086 mode.
8075 */
8076 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8077 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8078 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8079 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8080 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8081
8082 if ( GCPtrFirst32 > pSel->u32Limit
8083 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8084 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8085
8086 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8087 {
8088 /** @todo CPL check. */
8089 }
8090
8091 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8092 }
8093 }
8094 else
8095 return iemRaiseGeneralProtectionFault0(pVCpu);
8096 return VINF_SUCCESS;
8097 }
8098
8099 case IEMMODE_64BIT:
8100 {
8101 RTGCPTR GCPtrMem = *pGCPtrMem;
8102 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8103 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8104
8105 Assert(cbMem >= 1);
8106 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8107 return VINF_SUCCESS;
8108 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8109 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8110 return iemRaiseGeneralProtectionFault0(pVCpu);
8111 }
8112
8113 default:
8114 AssertFailedReturn(VERR_IEM_IPE_7);
8115 }
8116}
8117
8118
8119/**
8120 * Translates a virtual address to a physical physical address and checks if we
8121 * can access the page as specified.
8122 *
8123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8124 * @param GCPtrMem The virtual address.
8125 * @param fAccess The intended access.
8126 * @param pGCPhysMem Where to return the physical address.
8127 */
8128IEM_STATIC VBOXSTRICTRC
8129iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8130{
8131 /** @todo Need a different PGM interface here. We're currently using
8132 * generic / REM interfaces. this won't cut it for R0 & RC. */
8133 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8134 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8135 RTGCPHYS GCPhys;
8136 uint64_t fFlags;
8137 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8138 if (RT_FAILURE(rc))
8139 {
8140 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8141 /** @todo Check unassigned memory in unpaged mode. */
8142 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8143 *pGCPhysMem = NIL_RTGCPHYS;
8144 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8145 }
8146
8147 /* If the page is writable and does not have the no-exec bit set, all
8148 access is allowed. Otherwise we'll have to check more carefully... */
8149 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8150 {
8151 /* Write to read only memory? */
8152 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8153 && !(fFlags & X86_PTE_RW)
8154 && ( (pVCpu->iem.s.uCpl == 3
8155 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8156 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8157 {
8158 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8159 *pGCPhysMem = NIL_RTGCPHYS;
8160 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8161 }
8162
8163 /* Kernel memory accessed by userland? */
8164 if ( !(fFlags & X86_PTE_US)
8165 && pVCpu->iem.s.uCpl == 3
8166 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8167 {
8168 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8169 *pGCPhysMem = NIL_RTGCPHYS;
8170 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8171 }
8172
8173 /* Executing non-executable memory? */
8174 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8175 && (fFlags & X86_PTE_PAE_NX)
8176 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8177 {
8178 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8179 *pGCPhysMem = NIL_RTGCPHYS;
8180 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8181 VERR_ACCESS_DENIED);
8182 }
8183 }
8184
8185 /*
8186 * Set the dirty / access flags.
8187 * ASSUMES this is set when the address is translated rather than on committ...
8188 */
8189 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8190 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8191 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8192 {
8193 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8194 AssertRC(rc2);
8195 }
8196
8197 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8198 *pGCPhysMem = GCPhys;
8199 return VINF_SUCCESS;
8200}
8201
8202
8203
8204/**
8205 * Maps a physical page.
8206 *
8207 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8209 * @param GCPhysMem The physical address.
8210 * @param fAccess The intended access.
8211 * @param ppvMem Where to return the mapping address.
8212 * @param pLock The PGM lock.
8213 */
8214IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8215{
8216#ifdef IEM_LOG_MEMORY_WRITES
8217 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8218 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8219#endif
8220
8221 /** @todo This API may require some improving later. A private deal with PGM
8222 * regarding locking and unlocking needs to be struct. A couple of TLBs
8223 * living in PGM, but with publicly accessible inlined access methods
8224 * could perhaps be an even better solution. */
8225 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8226 GCPhysMem,
8227 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8228 pVCpu->iem.s.fBypassHandlers,
8229 ppvMem,
8230 pLock);
8231 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8232 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8233
8234 return rc;
8235}
8236
8237
8238/**
8239 * Unmap a page previously mapped by iemMemPageMap.
8240 *
8241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8242 * @param GCPhysMem The physical address.
8243 * @param fAccess The intended access.
8244 * @param pvMem What iemMemPageMap returned.
8245 * @param pLock The PGM lock.
8246 */
8247DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8248{
8249 NOREF(pVCpu);
8250 NOREF(GCPhysMem);
8251 NOREF(fAccess);
8252 NOREF(pvMem);
8253 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8254}
8255
8256
8257/**
8258 * Looks up a memory mapping entry.
8259 *
8260 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8262 * @param pvMem The memory address.
8263 * @param fAccess The access to.
8264 */
8265DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8266{
8267 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8268 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8269 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8270 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8271 return 0;
8272 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8273 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8274 return 1;
8275 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8276 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8277 return 2;
8278 return VERR_NOT_FOUND;
8279}
8280
8281
8282/**
8283 * Finds a free memmap entry when using iNextMapping doesn't work.
8284 *
8285 * @returns Memory mapping index, 1024 on failure.
8286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8287 */
8288IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8289{
8290 /*
8291 * The easy case.
8292 */
8293 if (pVCpu->iem.s.cActiveMappings == 0)
8294 {
8295 pVCpu->iem.s.iNextMapping = 1;
8296 return 0;
8297 }
8298
8299 /* There should be enough mappings for all instructions. */
8300 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8301
8302 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8303 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8304 return i;
8305
8306 AssertFailedReturn(1024);
8307}
8308
8309
8310/**
8311 * Commits a bounce buffer that needs writing back and unmaps it.
8312 *
8313 * @returns Strict VBox status code.
8314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8315 * @param iMemMap The index of the buffer to commit.
8316 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8317 * Always false in ring-3, obviously.
8318 */
8319IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8320{
8321 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8322 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8323#ifdef IN_RING3
8324 Assert(!fPostponeFail);
8325 RT_NOREF_PV(fPostponeFail);
8326#endif
8327
8328 /*
8329 * Do the writing.
8330 */
8331 PVM pVM = pVCpu->CTX_SUFF(pVM);
8332 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8333 {
8334 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8335 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8336 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8337 if (!pVCpu->iem.s.fBypassHandlers)
8338 {
8339 /*
8340 * Carefully and efficiently dealing with access handler return
8341 * codes make this a little bloated.
8342 */
8343 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8344 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8345 pbBuf,
8346 cbFirst,
8347 PGMACCESSORIGIN_IEM);
8348 if (rcStrict == VINF_SUCCESS)
8349 {
8350 if (cbSecond)
8351 {
8352 rcStrict = PGMPhysWrite(pVM,
8353 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8354 pbBuf + cbFirst,
8355 cbSecond,
8356 PGMACCESSORIGIN_IEM);
8357 if (rcStrict == VINF_SUCCESS)
8358 { /* nothing */ }
8359 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8360 {
8361 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8362 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8363 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8364 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8365 }
8366#ifndef IN_RING3
8367 else if (fPostponeFail)
8368 {
8369 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8370 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8371 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8372 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8373 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8374 return iemSetPassUpStatus(pVCpu, rcStrict);
8375 }
8376#endif
8377 else
8378 {
8379 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8380 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8381 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8382 return rcStrict;
8383 }
8384 }
8385 }
8386 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8387 {
8388 if (!cbSecond)
8389 {
8390 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8392 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8393 }
8394 else
8395 {
8396 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8397 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8398 pbBuf + cbFirst,
8399 cbSecond,
8400 PGMACCESSORIGIN_IEM);
8401 if (rcStrict2 == VINF_SUCCESS)
8402 {
8403 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8404 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8405 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8406 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8407 }
8408 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8409 {
8410 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8412 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8413 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8414 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8415 }
8416#ifndef IN_RING3
8417 else if (fPostponeFail)
8418 {
8419 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8420 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8421 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8422 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8423 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8424 return iemSetPassUpStatus(pVCpu, rcStrict);
8425 }
8426#endif
8427 else
8428 {
8429 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8430 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8431 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8432 return rcStrict2;
8433 }
8434 }
8435 }
8436#ifndef IN_RING3
8437 else if (fPostponeFail)
8438 {
8439 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8440 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8441 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8442 if (!cbSecond)
8443 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8444 else
8445 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8446 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8447 return iemSetPassUpStatus(pVCpu, rcStrict);
8448 }
8449#endif
8450 else
8451 {
8452 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8453 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8454 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8455 return rcStrict;
8456 }
8457 }
8458 else
8459 {
8460 /*
8461 * No access handlers, much simpler.
8462 */
8463 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8464 if (RT_SUCCESS(rc))
8465 {
8466 if (cbSecond)
8467 {
8468 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8469 if (RT_SUCCESS(rc))
8470 { /* likely */ }
8471 else
8472 {
8473 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8474 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8475 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8476 return rc;
8477 }
8478 }
8479 }
8480 else
8481 {
8482 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8483 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8484 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8485 return rc;
8486 }
8487 }
8488 }
8489
8490#if defined(IEM_LOG_MEMORY_WRITES)
8491 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8492 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8493 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8494 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8495 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8496 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8497
8498 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8499 g_cbIemWrote = cbWrote;
8500 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8501#endif
8502
8503 /*
8504 * Free the mapping entry.
8505 */
8506 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8507 Assert(pVCpu->iem.s.cActiveMappings != 0);
8508 pVCpu->iem.s.cActiveMappings--;
8509 return VINF_SUCCESS;
8510}
8511
8512
8513/**
8514 * iemMemMap worker that deals with a request crossing pages.
8515 */
8516IEM_STATIC VBOXSTRICTRC
8517iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8518{
8519 /*
8520 * Do the address translations.
8521 */
8522 RTGCPHYS GCPhysFirst;
8523 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8524 if (rcStrict != VINF_SUCCESS)
8525 return rcStrict;
8526
8527 RTGCPHYS GCPhysSecond;
8528 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8529 fAccess, &GCPhysSecond);
8530 if (rcStrict != VINF_SUCCESS)
8531 return rcStrict;
8532 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8533
8534 PVM pVM = pVCpu->CTX_SUFF(pVM);
8535
8536 /*
8537 * Read in the current memory content if it's a read, execute or partial
8538 * write access.
8539 */
8540 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8541 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8542 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8543
8544 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8545 {
8546 if (!pVCpu->iem.s.fBypassHandlers)
8547 {
8548 /*
8549 * Must carefully deal with access handler status codes here,
8550 * makes the code a bit bloated.
8551 */
8552 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8553 if (rcStrict == VINF_SUCCESS)
8554 {
8555 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8556 if (rcStrict == VINF_SUCCESS)
8557 { /*likely */ }
8558 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8559 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8560 else
8561 {
8562 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8563 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8564 return rcStrict;
8565 }
8566 }
8567 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8568 {
8569 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8570 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8571 {
8572 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8573 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8574 }
8575 else
8576 {
8577 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8578 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8579 return rcStrict2;
8580 }
8581 }
8582 else
8583 {
8584 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8585 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8586 return rcStrict;
8587 }
8588 }
8589 else
8590 {
8591 /*
8592 * No informational status codes here, much more straight forward.
8593 */
8594 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8595 if (RT_SUCCESS(rc))
8596 {
8597 Assert(rc == VINF_SUCCESS);
8598 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8599 if (RT_SUCCESS(rc))
8600 Assert(rc == VINF_SUCCESS);
8601 else
8602 {
8603 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8604 return rc;
8605 }
8606 }
8607 else
8608 {
8609 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8610 return rc;
8611 }
8612 }
8613 }
8614#ifdef VBOX_STRICT
8615 else
8616 memset(pbBuf, 0xcc, cbMem);
8617 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8618 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8619#endif
8620
8621 /*
8622 * Commit the bounce buffer entry.
8623 */
8624 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8625 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8626 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8627 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8628 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8629 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8630 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8631 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8632 pVCpu->iem.s.cActiveMappings++;
8633
8634 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8635 *ppvMem = pbBuf;
8636 return VINF_SUCCESS;
8637}
8638
8639
8640/**
8641 * iemMemMap woker that deals with iemMemPageMap failures.
8642 */
8643IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8644 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8645{
8646 /*
8647 * Filter out conditions we can handle and the ones which shouldn't happen.
8648 */
8649 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8650 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8651 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8652 {
8653 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8654 return rcMap;
8655 }
8656 pVCpu->iem.s.cPotentialExits++;
8657
8658 /*
8659 * Read in the current memory content if it's a read, execute or partial
8660 * write access.
8661 */
8662 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8663 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8664 {
8665 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8666 memset(pbBuf, 0xff, cbMem);
8667 else
8668 {
8669 int rc;
8670 if (!pVCpu->iem.s.fBypassHandlers)
8671 {
8672 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8673 if (rcStrict == VINF_SUCCESS)
8674 { /* nothing */ }
8675 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8676 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8677 else
8678 {
8679 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8680 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8681 return rcStrict;
8682 }
8683 }
8684 else
8685 {
8686 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8687 if (RT_SUCCESS(rc))
8688 { /* likely */ }
8689 else
8690 {
8691 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8692 GCPhysFirst, rc));
8693 return rc;
8694 }
8695 }
8696 }
8697 }
8698#ifdef VBOX_STRICT
8699 else
8700 memset(pbBuf, 0xcc, cbMem);
8701#endif
8702#ifdef VBOX_STRICT
8703 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8704 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8705#endif
8706
8707 /*
8708 * Commit the bounce buffer entry.
8709 */
8710 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8711 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8712 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8713 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8714 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8715 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8716 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8717 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8718 pVCpu->iem.s.cActiveMappings++;
8719
8720 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8721 *ppvMem = pbBuf;
8722 return VINF_SUCCESS;
8723}
8724
8725
8726
8727/**
8728 * Maps the specified guest memory for the given kind of access.
8729 *
8730 * This may be using bounce buffering of the memory if it's crossing a page
8731 * boundary or if there is an access handler installed for any of it. Because
8732 * of lock prefix guarantees, we're in for some extra clutter when this
8733 * happens.
8734 *
8735 * This may raise a \#GP, \#SS, \#PF or \#AC.
8736 *
8737 * @returns VBox strict status code.
8738 *
8739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8740 * @param ppvMem Where to return the pointer to the mapped
8741 * memory.
8742 * @param cbMem The number of bytes to map. This is usually 1,
8743 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8744 * string operations it can be up to a page.
8745 * @param iSegReg The index of the segment register to use for
8746 * this access. The base and limits are checked.
8747 * Use UINT8_MAX to indicate that no segmentation
8748 * is required (for IDT, GDT and LDT accesses).
8749 * @param GCPtrMem The address of the guest memory.
8750 * @param fAccess How the memory is being accessed. The
8751 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8752 * how to map the memory, while the
8753 * IEM_ACCESS_WHAT_XXX bit is used when raising
8754 * exceptions.
8755 */
8756IEM_STATIC VBOXSTRICTRC
8757iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8758{
8759 /*
8760 * Check the input and figure out which mapping entry to use.
8761 */
8762 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8763 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8764 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8765
8766 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8767 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8768 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8769 {
8770 iMemMap = iemMemMapFindFree(pVCpu);
8771 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8772 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8773 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8774 pVCpu->iem.s.aMemMappings[2].fAccess),
8775 VERR_IEM_IPE_9);
8776 }
8777
8778 /*
8779 * Map the memory, checking that we can actually access it. If something
8780 * slightly complicated happens, fall back on bounce buffering.
8781 */
8782 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8783 if (rcStrict != VINF_SUCCESS)
8784 return rcStrict;
8785
8786 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8787 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8788
8789 RTGCPHYS GCPhysFirst;
8790 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8791 if (rcStrict != VINF_SUCCESS)
8792 return rcStrict;
8793
8794 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8795 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8796 if (fAccess & IEM_ACCESS_TYPE_READ)
8797 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8798
8799 void *pvMem;
8800 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8801 if (rcStrict != VINF_SUCCESS)
8802 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8803
8804 /*
8805 * Fill in the mapping table entry.
8806 */
8807 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8808 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8809 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8810 pVCpu->iem.s.cActiveMappings++;
8811
8812 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8813 *ppvMem = pvMem;
8814 return VINF_SUCCESS;
8815}
8816
8817
8818/**
8819 * Commits the guest memory if bounce buffered and unmaps it.
8820 *
8821 * @returns Strict VBox status code.
8822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8823 * @param pvMem The mapping.
8824 * @param fAccess The kind of access.
8825 */
8826IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8827{
8828 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8829 AssertReturn(iMemMap >= 0, iMemMap);
8830
8831 /* If it's bounce buffered, we may need to write back the buffer. */
8832 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8833 {
8834 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8835 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8836 }
8837 /* Otherwise unlock it. */
8838 else
8839 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8840
8841 /* Free the entry. */
8842 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8843 Assert(pVCpu->iem.s.cActiveMappings != 0);
8844 pVCpu->iem.s.cActiveMappings--;
8845 return VINF_SUCCESS;
8846}
8847
8848#ifdef IEM_WITH_SETJMP
8849
8850/**
8851 * Maps the specified guest memory for the given kind of access, longjmp on
8852 * error.
8853 *
8854 * This may be using bounce buffering of the memory if it's crossing a page
8855 * boundary or if there is an access handler installed for any of it. Because
8856 * of lock prefix guarantees, we're in for some extra clutter when this
8857 * happens.
8858 *
8859 * This may raise a \#GP, \#SS, \#PF or \#AC.
8860 *
8861 * @returns Pointer to the mapped memory.
8862 *
8863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8864 * @param cbMem The number of bytes to map. This is usually 1,
8865 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8866 * string operations it can be up to a page.
8867 * @param iSegReg The index of the segment register to use for
8868 * this access. The base and limits are checked.
8869 * Use UINT8_MAX to indicate that no segmentation
8870 * is required (for IDT, GDT and LDT accesses).
8871 * @param GCPtrMem The address of the guest memory.
8872 * @param fAccess How the memory is being accessed. The
8873 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8874 * how to map the memory, while the
8875 * IEM_ACCESS_WHAT_XXX bit is used when raising
8876 * exceptions.
8877 */
8878IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8879{
8880 /*
8881 * Check the input and figure out which mapping entry to use.
8882 */
8883 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8884 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8885 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8886
8887 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8888 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8889 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8890 {
8891 iMemMap = iemMemMapFindFree(pVCpu);
8892 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8893 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8894 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8895 pVCpu->iem.s.aMemMappings[2].fAccess),
8896 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8897 }
8898
8899 /*
8900 * Map the memory, checking that we can actually access it. If something
8901 * slightly complicated happens, fall back on bounce buffering.
8902 */
8903 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8904 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8905 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8906
8907 /* Crossing a page boundary? */
8908 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8909 { /* No (likely). */ }
8910 else
8911 {
8912 void *pvMem;
8913 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8914 if (rcStrict == VINF_SUCCESS)
8915 return pvMem;
8916 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8917 }
8918
8919 RTGCPHYS GCPhysFirst;
8920 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8921 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8922 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8923
8924 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8925 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8926 if (fAccess & IEM_ACCESS_TYPE_READ)
8927 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8928
8929 void *pvMem;
8930 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8931 if (rcStrict == VINF_SUCCESS)
8932 { /* likely */ }
8933 else
8934 {
8935 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8936 if (rcStrict == VINF_SUCCESS)
8937 return pvMem;
8938 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8939 }
8940
8941 /*
8942 * Fill in the mapping table entry.
8943 */
8944 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8945 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8946 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8947 pVCpu->iem.s.cActiveMappings++;
8948
8949 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8950 return pvMem;
8951}
8952
8953
8954/**
8955 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8956 *
8957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8958 * @param pvMem The mapping.
8959 * @param fAccess The kind of access.
8960 */
8961IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8962{
8963 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8964 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8965
8966 /* If it's bounce buffered, we may need to write back the buffer. */
8967 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8968 {
8969 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8970 {
8971 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8972 if (rcStrict == VINF_SUCCESS)
8973 return;
8974 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8975 }
8976 }
8977 /* Otherwise unlock it. */
8978 else
8979 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8980
8981 /* Free the entry. */
8982 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8983 Assert(pVCpu->iem.s.cActiveMappings != 0);
8984 pVCpu->iem.s.cActiveMappings--;
8985}
8986
8987#endif /* IEM_WITH_SETJMP */
8988
8989#ifndef IN_RING3
8990/**
8991 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8992 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8993 *
8994 * Allows the instruction to be completed and retired, while the IEM user will
8995 * return to ring-3 immediately afterwards and do the postponed writes there.
8996 *
8997 * @returns VBox status code (no strict statuses). Caller must check
8998 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9000 * @param pvMem The mapping.
9001 * @param fAccess The kind of access.
9002 */
9003IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9004{
9005 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9006 AssertReturn(iMemMap >= 0, iMemMap);
9007
9008 /* If it's bounce buffered, we may need to write back the buffer. */
9009 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9010 {
9011 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9012 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9013 }
9014 /* Otherwise unlock it. */
9015 else
9016 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9017
9018 /* Free the entry. */
9019 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9020 Assert(pVCpu->iem.s.cActiveMappings != 0);
9021 pVCpu->iem.s.cActiveMappings--;
9022 return VINF_SUCCESS;
9023}
9024#endif
9025
9026
9027/**
9028 * Rollbacks mappings, releasing page locks and such.
9029 *
9030 * The caller shall only call this after checking cActiveMappings.
9031 *
9032 * @returns Strict VBox status code to pass up.
9033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9034 */
9035IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9036{
9037 Assert(pVCpu->iem.s.cActiveMappings > 0);
9038
9039 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9040 while (iMemMap-- > 0)
9041 {
9042 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9043 if (fAccess != IEM_ACCESS_INVALID)
9044 {
9045 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9046 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9047 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9048 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9049 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9050 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9051 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9052 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9053 pVCpu->iem.s.cActiveMappings--;
9054 }
9055 }
9056}
9057
9058
9059/**
9060 * Fetches a data byte.
9061 *
9062 * @returns Strict VBox status code.
9063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9064 * @param pu8Dst Where to return the byte.
9065 * @param iSegReg The index of the segment register to use for
9066 * this access. The base and limits are checked.
9067 * @param GCPtrMem The address of the guest memory.
9068 */
9069IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9070{
9071 /* The lazy approach for now... */
9072 uint8_t const *pu8Src;
9073 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9074 if (rc == VINF_SUCCESS)
9075 {
9076 *pu8Dst = *pu8Src;
9077 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9078 }
9079 return rc;
9080}
9081
9082
9083#ifdef IEM_WITH_SETJMP
9084/**
9085 * Fetches a data byte, longjmp on error.
9086 *
9087 * @returns The byte.
9088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9089 * @param iSegReg The index of the segment register to use for
9090 * this access. The base and limits are checked.
9091 * @param GCPtrMem The address of the guest memory.
9092 */
9093DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9094{
9095 /* The lazy approach for now... */
9096 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9097 uint8_t const bRet = *pu8Src;
9098 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9099 return bRet;
9100}
9101#endif /* IEM_WITH_SETJMP */
9102
9103
9104/**
9105 * Fetches a data word.
9106 *
9107 * @returns Strict VBox status code.
9108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9109 * @param pu16Dst Where to return the word.
9110 * @param iSegReg The index of the segment register to use for
9111 * this access. The base and limits are checked.
9112 * @param GCPtrMem The address of the guest memory.
9113 */
9114IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9115{
9116 /* The lazy approach for now... */
9117 uint16_t const *pu16Src;
9118 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9119 if (rc == VINF_SUCCESS)
9120 {
9121 *pu16Dst = *pu16Src;
9122 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9123 }
9124 return rc;
9125}
9126
9127
9128#ifdef IEM_WITH_SETJMP
9129/**
9130 * Fetches a data word, longjmp on error.
9131 *
9132 * @returns The word
9133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9134 * @param iSegReg The index of the segment register to use for
9135 * this access. The base and limits are checked.
9136 * @param GCPtrMem The address of the guest memory.
9137 */
9138DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9139{
9140 /* The lazy approach for now... */
9141 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9142 uint16_t const u16Ret = *pu16Src;
9143 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9144 return u16Ret;
9145}
9146#endif
9147
9148
9149/**
9150 * Fetches a data dword.
9151 *
9152 * @returns Strict VBox status code.
9153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9154 * @param pu32Dst Where to return the dword.
9155 * @param iSegReg The index of the segment register to use for
9156 * this access. The base and limits are checked.
9157 * @param GCPtrMem The address of the guest memory.
9158 */
9159IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9160{
9161 /* The lazy approach for now... */
9162 uint32_t const *pu32Src;
9163 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9164 if (rc == VINF_SUCCESS)
9165 {
9166 *pu32Dst = *pu32Src;
9167 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9168 }
9169 return rc;
9170}
9171
9172
9173#ifdef IEM_WITH_SETJMP
9174
9175IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9176{
9177 Assert(cbMem >= 1);
9178 Assert(iSegReg < X86_SREG_COUNT);
9179
9180 /*
9181 * 64-bit mode is simpler.
9182 */
9183 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9184 {
9185 if (iSegReg >= X86_SREG_FS)
9186 {
9187 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9188 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9189 GCPtrMem += pSel->u64Base;
9190 }
9191
9192 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9193 return GCPtrMem;
9194 }
9195 /*
9196 * 16-bit and 32-bit segmentation.
9197 */
9198 else
9199 {
9200 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9201 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9202 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9203 == X86DESCATTR_P /* data, expand up */
9204 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9205 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9206 {
9207 /* expand up */
9208 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9209 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9210 && GCPtrLast32 > (uint32_t)GCPtrMem))
9211 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9212 }
9213 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9214 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9215 {
9216 /* expand down */
9217 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9218 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9219 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9220 && GCPtrLast32 > (uint32_t)GCPtrMem))
9221 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9222 }
9223 else
9224 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9225 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9226 }
9227 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9228}
9229
9230
9231IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9232{
9233 Assert(cbMem >= 1);
9234 Assert(iSegReg < X86_SREG_COUNT);
9235
9236 /*
9237 * 64-bit mode is simpler.
9238 */
9239 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9240 {
9241 if (iSegReg >= X86_SREG_FS)
9242 {
9243 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9244 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9245 GCPtrMem += pSel->u64Base;
9246 }
9247
9248 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9249 return GCPtrMem;
9250 }
9251 /*
9252 * 16-bit and 32-bit segmentation.
9253 */
9254 else
9255 {
9256 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9257 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9258 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9259 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9260 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9261 {
9262 /* expand up */
9263 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9264 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9265 && GCPtrLast32 > (uint32_t)GCPtrMem))
9266 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9267 }
9268 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9269 {
9270 /* expand down */
9271 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9272 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9273 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9274 && GCPtrLast32 > (uint32_t)GCPtrMem))
9275 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9276 }
9277 else
9278 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9279 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9280 }
9281 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9282}
9283
9284
9285/**
9286 * Fetches a data dword, longjmp on error, fallback/safe version.
9287 *
9288 * @returns The dword
9289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9290 * @param iSegReg The index of the segment register to use for
9291 * this access. The base and limits are checked.
9292 * @param GCPtrMem The address of the guest memory.
9293 */
9294IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9295{
9296 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9297 uint32_t const u32Ret = *pu32Src;
9298 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9299 return u32Ret;
9300}
9301
9302
9303/**
9304 * Fetches a data dword, longjmp on error.
9305 *
9306 * @returns The dword
9307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9308 * @param iSegReg The index of the segment register to use for
9309 * this access. The base and limits are checked.
9310 * @param GCPtrMem The address of the guest memory.
9311 */
9312DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9313{
9314# ifdef IEM_WITH_DATA_TLB
9315 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9316 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9317 {
9318 /// @todo more later.
9319 }
9320
9321 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9322# else
9323 /* The lazy approach. */
9324 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9325 uint32_t const u32Ret = *pu32Src;
9326 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9327 return u32Ret;
9328# endif
9329}
9330#endif
9331
9332
9333#ifdef SOME_UNUSED_FUNCTION
9334/**
9335 * Fetches a data dword and sign extends it to a qword.
9336 *
9337 * @returns Strict VBox status code.
9338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9339 * @param pu64Dst Where to return the sign extended value.
9340 * @param iSegReg The index of the segment register to use for
9341 * this access. The base and limits are checked.
9342 * @param GCPtrMem The address of the guest memory.
9343 */
9344IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9345{
9346 /* The lazy approach for now... */
9347 int32_t const *pi32Src;
9348 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9349 if (rc == VINF_SUCCESS)
9350 {
9351 *pu64Dst = *pi32Src;
9352 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9353 }
9354#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9355 else
9356 *pu64Dst = 0;
9357#endif
9358 return rc;
9359}
9360#endif
9361
9362
9363/**
9364 * Fetches a data qword.
9365 *
9366 * @returns Strict VBox status code.
9367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9368 * @param pu64Dst Where to return the qword.
9369 * @param iSegReg The index of the segment register to use for
9370 * this access. The base and limits are checked.
9371 * @param GCPtrMem The address of the guest memory.
9372 */
9373IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9374{
9375 /* The lazy approach for now... */
9376 uint64_t const *pu64Src;
9377 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9378 if (rc == VINF_SUCCESS)
9379 {
9380 *pu64Dst = *pu64Src;
9381 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9382 }
9383 return rc;
9384}
9385
9386
9387#ifdef IEM_WITH_SETJMP
9388/**
9389 * Fetches a data qword, longjmp on error.
9390 *
9391 * @returns The qword.
9392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9393 * @param iSegReg The index of the segment register to use for
9394 * this access. The base and limits are checked.
9395 * @param GCPtrMem The address of the guest memory.
9396 */
9397DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9398{
9399 /* The lazy approach for now... */
9400 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9401 uint64_t const u64Ret = *pu64Src;
9402 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9403 return u64Ret;
9404}
9405#endif
9406
9407
9408/**
9409 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9410 *
9411 * @returns Strict VBox status code.
9412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9413 * @param pu64Dst Where to return the qword.
9414 * @param iSegReg The index of the segment register to use for
9415 * this access. The base and limits are checked.
9416 * @param GCPtrMem The address of the guest memory.
9417 */
9418IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9419{
9420 /* The lazy approach for now... */
9421 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9422 if (RT_UNLIKELY(GCPtrMem & 15))
9423 return iemRaiseGeneralProtectionFault0(pVCpu);
9424
9425 uint64_t const *pu64Src;
9426 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9427 if (rc == VINF_SUCCESS)
9428 {
9429 *pu64Dst = *pu64Src;
9430 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9431 }
9432 return rc;
9433}
9434
9435
9436#ifdef IEM_WITH_SETJMP
9437/**
9438 * Fetches a data qword, longjmp on error.
9439 *
9440 * @returns The qword.
9441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9442 * @param iSegReg The index of the segment register to use for
9443 * this access. The base and limits are checked.
9444 * @param GCPtrMem The address of the guest memory.
9445 */
9446DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9447{
9448 /* The lazy approach for now... */
9449 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9450 if (RT_LIKELY(!(GCPtrMem & 15)))
9451 {
9452 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9453 uint64_t const u64Ret = *pu64Src;
9454 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9455 return u64Ret;
9456 }
9457
9458 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9459 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9460}
9461#endif
9462
9463
9464/**
9465 * Fetches a data tword.
9466 *
9467 * @returns Strict VBox status code.
9468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9469 * @param pr80Dst Where to return the tword.
9470 * @param iSegReg The index of the segment register to use for
9471 * this access. The base and limits are checked.
9472 * @param GCPtrMem The address of the guest memory.
9473 */
9474IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9475{
9476 /* The lazy approach for now... */
9477 PCRTFLOAT80U pr80Src;
9478 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9479 if (rc == VINF_SUCCESS)
9480 {
9481 *pr80Dst = *pr80Src;
9482 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9483 }
9484 return rc;
9485}
9486
9487
9488#ifdef IEM_WITH_SETJMP
9489/**
9490 * Fetches a data tword, longjmp on error.
9491 *
9492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9493 * @param pr80Dst Where to return the tword.
9494 * @param iSegReg The index of the segment register to use for
9495 * this access. The base and limits are checked.
9496 * @param GCPtrMem The address of the guest memory.
9497 */
9498DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9499{
9500 /* The lazy approach for now... */
9501 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9502 *pr80Dst = *pr80Src;
9503 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9504}
9505#endif
9506
9507
9508/**
9509 * Fetches a data dqword (double qword), generally SSE related.
9510 *
9511 * @returns Strict VBox status code.
9512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9513 * @param pu128Dst Where to return the qword.
9514 * @param iSegReg The index of the segment register to use for
9515 * this access. The base and limits are checked.
9516 * @param GCPtrMem The address of the guest memory.
9517 */
9518IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9519{
9520 /* The lazy approach for now... */
9521 PCRTUINT128U pu128Src;
9522 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9523 if (rc == VINF_SUCCESS)
9524 {
9525 pu128Dst->au64[0] = pu128Src->au64[0];
9526 pu128Dst->au64[1] = pu128Src->au64[1];
9527 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9528 }
9529 return rc;
9530}
9531
9532
9533#ifdef IEM_WITH_SETJMP
9534/**
9535 * Fetches a data dqword (double qword), generally SSE related.
9536 *
9537 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9538 * @param pu128Dst Where to return the qword.
9539 * @param iSegReg The index of the segment register to use for
9540 * this access. The base and limits are checked.
9541 * @param GCPtrMem The address of the guest memory.
9542 */
9543IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9544{
9545 /* The lazy approach for now... */
9546 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9547 pu128Dst->au64[0] = pu128Src->au64[0];
9548 pu128Dst->au64[1] = pu128Src->au64[1];
9549 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9550}
9551#endif
9552
9553
9554/**
9555 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9556 * related.
9557 *
9558 * Raises \#GP(0) if not aligned.
9559 *
9560 * @returns Strict VBox status code.
9561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9562 * @param pu128Dst Where to return the qword.
9563 * @param iSegReg The index of the segment register to use for
9564 * this access. The base and limits are checked.
9565 * @param GCPtrMem The address of the guest memory.
9566 */
9567IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9568{
9569 /* The lazy approach for now... */
9570 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9571 if ( (GCPtrMem & 15)
9572 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9573 return iemRaiseGeneralProtectionFault0(pVCpu);
9574
9575 PCRTUINT128U pu128Src;
9576 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9577 if (rc == VINF_SUCCESS)
9578 {
9579 pu128Dst->au64[0] = pu128Src->au64[0];
9580 pu128Dst->au64[1] = pu128Src->au64[1];
9581 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9582 }
9583 return rc;
9584}
9585
9586
9587#ifdef IEM_WITH_SETJMP
9588/**
9589 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9590 * related, longjmp on error.
9591 *
9592 * Raises \#GP(0) if not aligned.
9593 *
9594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9595 * @param pu128Dst Where to return the qword.
9596 * @param iSegReg The index of the segment register to use for
9597 * this access. The base and limits are checked.
9598 * @param GCPtrMem The address of the guest memory.
9599 */
9600DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9601{
9602 /* The lazy approach for now... */
9603 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9604 if ( (GCPtrMem & 15) == 0
9605 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9606 {
9607 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9608 pu128Dst->au64[0] = pu128Src->au64[0];
9609 pu128Dst->au64[1] = pu128Src->au64[1];
9610 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9611 return;
9612 }
9613
9614 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9615 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9616}
9617#endif
9618
9619
9620/**
9621 * Fetches a data oword (octo word), generally AVX related.
9622 *
9623 * @returns Strict VBox status code.
9624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9625 * @param pu256Dst Where to return the qword.
9626 * @param iSegReg The index of the segment register to use for
9627 * this access. The base and limits are checked.
9628 * @param GCPtrMem The address of the guest memory.
9629 */
9630IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9631{
9632 /* The lazy approach for now... */
9633 PCRTUINT256U pu256Src;
9634 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9635 if (rc == VINF_SUCCESS)
9636 {
9637 pu256Dst->au64[0] = pu256Src->au64[0];
9638 pu256Dst->au64[1] = pu256Src->au64[1];
9639 pu256Dst->au64[2] = pu256Src->au64[2];
9640 pu256Dst->au64[3] = pu256Src->au64[3];
9641 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9642 }
9643 return rc;
9644}
9645
9646
9647#ifdef IEM_WITH_SETJMP
9648/**
9649 * Fetches a data oword (octo word), generally AVX related.
9650 *
9651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9652 * @param pu256Dst Where to return the qword.
9653 * @param iSegReg The index of the segment register to use for
9654 * this access. The base and limits are checked.
9655 * @param GCPtrMem The address of the guest memory.
9656 */
9657IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9658{
9659 /* The lazy approach for now... */
9660 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9661 pu256Dst->au64[0] = pu256Src->au64[0];
9662 pu256Dst->au64[1] = pu256Src->au64[1];
9663 pu256Dst->au64[2] = pu256Src->au64[2];
9664 pu256Dst->au64[3] = pu256Src->au64[3];
9665 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9666}
9667#endif
9668
9669
9670/**
9671 * Fetches a data oword (octo word) at an aligned address, generally AVX
9672 * related.
9673 *
9674 * Raises \#GP(0) if not aligned.
9675 *
9676 * @returns Strict VBox status code.
9677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9678 * @param pu256Dst Where to return the qword.
9679 * @param iSegReg The index of the segment register to use for
9680 * this access. The base and limits are checked.
9681 * @param GCPtrMem The address of the guest memory.
9682 */
9683IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9684{
9685 /* The lazy approach for now... */
9686 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9687 if (GCPtrMem & 31)
9688 return iemRaiseGeneralProtectionFault0(pVCpu);
9689
9690 PCRTUINT256U pu256Src;
9691 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9692 if (rc == VINF_SUCCESS)
9693 {
9694 pu256Dst->au64[0] = pu256Src->au64[0];
9695 pu256Dst->au64[1] = pu256Src->au64[1];
9696 pu256Dst->au64[2] = pu256Src->au64[2];
9697 pu256Dst->au64[3] = pu256Src->au64[3];
9698 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9699 }
9700 return rc;
9701}
9702
9703
9704#ifdef IEM_WITH_SETJMP
9705/**
9706 * Fetches a data oword (octo word) at an aligned address, generally AVX
9707 * related, longjmp on error.
9708 *
9709 * Raises \#GP(0) if not aligned.
9710 *
9711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9712 * @param pu256Dst Where to return the qword.
9713 * @param iSegReg The index of the segment register to use for
9714 * this access. The base and limits are checked.
9715 * @param GCPtrMem The address of the guest memory.
9716 */
9717DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9718{
9719 /* The lazy approach for now... */
9720 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9721 if ((GCPtrMem & 31) == 0)
9722 {
9723 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9724 pu256Dst->au64[0] = pu256Src->au64[0];
9725 pu256Dst->au64[1] = pu256Src->au64[1];
9726 pu256Dst->au64[2] = pu256Src->au64[2];
9727 pu256Dst->au64[3] = pu256Src->au64[3];
9728 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9729 return;
9730 }
9731
9732 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9733 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9734}
9735#endif
9736
9737
9738
9739/**
9740 * Fetches a descriptor register (lgdt, lidt).
9741 *
9742 * @returns Strict VBox status code.
9743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9744 * @param pcbLimit Where to return the limit.
9745 * @param pGCPtrBase Where to return the base.
9746 * @param iSegReg The index of the segment register to use for
9747 * this access. The base and limits are checked.
9748 * @param GCPtrMem The address of the guest memory.
9749 * @param enmOpSize The effective operand size.
9750 */
9751IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9752 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9753{
9754 /*
9755 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9756 * little special:
9757 * - The two reads are done separately.
9758 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9759 * - We suspect the 386 to actually commit the limit before the base in
9760 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9761 * don't try emulate this eccentric behavior, because it's not well
9762 * enough understood and rather hard to trigger.
9763 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9764 */
9765 VBOXSTRICTRC rcStrict;
9766 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9767 {
9768 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9769 if (rcStrict == VINF_SUCCESS)
9770 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9771 }
9772 else
9773 {
9774 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9775 if (enmOpSize == IEMMODE_32BIT)
9776 {
9777 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9778 {
9779 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9780 if (rcStrict == VINF_SUCCESS)
9781 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9782 }
9783 else
9784 {
9785 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9786 if (rcStrict == VINF_SUCCESS)
9787 {
9788 *pcbLimit = (uint16_t)uTmp;
9789 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9790 }
9791 }
9792 if (rcStrict == VINF_SUCCESS)
9793 *pGCPtrBase = uTmp;
9794 }
9795 else
9796 {
9797 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9798 if (rcStrict == VINF_SUCCESS)
9799 {
9800 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9801 if (rcStrict == VINF_SUCCESS)
9802 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9803 }
9804 }
9805 }
9806 return rcStrict;
9807}
9808
9809
9810
9811/**
9812 * Stores a data byte.
9813 *
9814 * @returns Strict VBox status code.
9815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9816 * @param iSegReg The index of the segment register to use for
9817 * this access. The base and limits are checked.
9818 * @param GCPtrMem The address of the guest memory.
9819 * @param u8Value The value to store.
9820 */
9821IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9822{
9823 /* The lazy approach for now... */
9824 uint8_t *pu8Dst;
9825 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9826 if (rc == VINF_SUCCESS)
9827 {
9828 *pu8Dst = u8Value;
9829 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9830 }
9831 return rc;
9832}
9833
9834
9835#ifdef IEM_WITH_SETJMP
9836/**
9837 * Stores a data byte, longjmp on error.
9838 *
9839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9840 * @param iSegReg The index of the segment register to use for
9841 * this access. The base and limits are checked.
9842 * @param GCPtrMem The address of the guest memory.
9843 * @param u8Value The value to store.
9844 */
9845IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9846{
9847 /* The lazy approach for now... */
9848 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9849 *pu8Dst = u8Value;
9850 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9851}
9852#endif
9853
9854
9855/**
9856 * Stores a data word.
9857 *
9858 * @returns Strict VBox status code.
9859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9860 * @param iSegReg The index of the segment register to use for
9861 * this access. The base and limits are checked.
9862 * @param GCPtrMem The address of the guest memory.
9863 * @param u16Value The value to store.
9864 */
9865IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9866{
9867 /* The lazy approach for now... */
9868 uint16_t *pu16Dst;
9869 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9870 if (rc == VINF_SUCCESS)
9871 {
9872 *pu16Dst = u16Value;
9873 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9874 }
9875 return rc;
9876}
9877
9878
9879#ifdef IEM_WITH_SETJMP
9880/**
9881 * Stores a data word, longjmp on error.
9882 *
9883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9884 * @param iSegReg The index of the segment register to use for
9885 * this access. The base and limits are checked.
9886 * @param GCPtrMem The address of the guest memory.
9887 * @param u16Value The value to store.
9888 */
9889IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9890{
9891 /* The lazy approach for now... */
9892 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9893 *pu16Dst = u16Value;
9894 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9895}
9896#endif
9897
9898
9899/**
9900 * Stores a data dword.
9901 *
9902 * @returns Strict VBox status code.
9903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9904 * @param iSegReg The index of the segment register to use for
9905 * this access. The base and limits are checked.
9906 * @param GCPtrMem The address of the guest memory.
9907 * @param u32Value The value to store.
9908 */
9909IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9910{
9911 /* The lazy approach for now... */
9912 uint32_t *pu32Dst;
9913 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9914 if (rc == VINF_SUCCESS)
9915 {
9916 *pu32Dst = u32Value;
9917 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9918 }
9919 return rc;
9920}
9921
9922
9923#ifdef IEM_WITH_SETJMP
9924/**
9925 * Stores a data dword.
9926 *
9927 * @returns Strict VBox status code.
9928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9929 * @param iSegReg The index of the segment register to use for
9930 * this access. The base and limits are checked.
9931 * @param GCPtrMem The address of the guest memory.
9932 * @param u32Value The value to store.
9933 */
9934IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9935{
9936 /* The lazy approach for now... */
9937 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9938 *pu32Dst = u32Value;
9939 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9940}
9941#endif
9942
9943
9944/**
9945 * Stores a data qword.
9946 *
9947 * @returns Strict VBox status code.
9948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9949 * @param iSegReg The index of the segment register to use for
9950 * this access. The base and limits are checked.
9951 * @param GCPtrMem The address of the guest memory.
9952 * @param u64Value The value to store.
9953 */
9954IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9955{
9956 /* The lazy approach for now... */
9957 uint64_t *pu64Dst;
9958 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9959 if (rc == VINF_SUCCESS)
9960 {
9961 *pu64Dst = u64Value;
9962 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9963 }
9964 return rc;
9965}
9966
9967
9968#ifdef IEM_WITH_SETJMP
9969/**
9970 * Stores a data qword, longjmp on error.
9971 *
9972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9973 * @param iSegReg The index of the segment register to use for
9974 * this access. The base and limits are checked.
9975 * @param GCPtrMem The address of the guest memory.
9976 * @param u64Value The value to store.
9977 */
9978IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9979{
9980 /* The lazy approach for now... */
9981 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9982 *pu64Dst = u64Value;
9983 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9984}
9985#endif
9986
9987
9988/**
9989 * Stores a data dqword.
9990 *
9991 * @returns Strict VBox status code.
9992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9993 * @param iSegReg The index of the segment register to use for
9994 * this access. The base and limits are checked.
9995 * @param GCPtrMem The address of the guest memory.
9996 * @param u128Value The value to store.
9997 */
9998IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9999{
10000 /* The lazy approach for now... */
10001 PRTUINT128U pu128Dst;
10002 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10003 if (rc == VINF_SUCCESS)
10004 {
10005 pu128Dst->au64[0] = u128Value.au64[0];
10006 pu128Dst->au64[1] = u128Value.au64[1];
10007 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10008 }
10009 return rc;
10010}
10011
10012
10013#ifdef IEM_WITH_SETJMP
10014/**
10015 * Stores a data dqword, longjmp on error.
10016 *
10017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10018 * @param iSegReg The index of the segment register to use for
10019 * this access. The base and limits are checked.
10020 * @param GCPtrMem The address of the guest memory.
10021 * @param u128Value The value to store.
10022 */
10023IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10024{
10025 /* The lazy approach for now... */
10026 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10027 pu128Dst->au64[0] = u128Value.au64[0];
10028 pu128Dst->au64[1] = u128Value.au64[1];
10029 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10030}
10031#endif
10032
10033
10034/**
10035 * Stores a data dqword, SSE aligned.
10036 *
10037 * @returns Strict VBox status code.
10038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10039 * @param iSegReg The index of the segment register to use for
10040 * this access. The base and limits are checked.
10041 * @param GCPtrMem The address of the guest memory.
10042 * @param u128Value The value to store.
10043 */
10044IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10045{
10046 /* The lazy approach for now... */
10047 if ( (GCPtrMem & 15)
10048 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10049 return iemRaiseGeneralProtectionFault0(pVCpu);
10050
10051 PRTUINT128U pu128Dst;
10052 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10053 if (rc == VINF_SUCCESS)
10054 {
10055 pu128Dst->au64[0] = u128Value.au64[0];
10056 pu128Dst->au64[1] = u128Value.au64[1];
10057 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10058 }
10059 return rc;
10060}
10061
10062
10063#ifdef IEM_WITH_SETJMP
10064/**
10065 * Stores a data dqword, SSE aligned.
10066 *
10067 * @returns Strict VBox status code.
10068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10069 * @param iSegReg The index of the segment register to use for
10070 * this access. The base and limits are checked.
10071 * @param GCPtrMem The address of the guest memory.
10072 * @param u128Value The value to store.
10073 */
10074DECL_NO_INLINE(IEM_STATIC, void)
10075iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10076{
10077 /* The lazy approach for now... */
10078 if ( (GCPtrMem & 15) == 0
10079 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10080 {
10081 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10082 pu128Dst->au64[0] = u128Value.au64[0];
10083 pu128Dst->au64[1] = u128Value.au64[1];
10084 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10085 return;
10086 }
10087
10088 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10089 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10090}
10091#endif
10092
10093
10094/**
10095 * Stores a data dqword.
10096 *
10097 * @returns Strict VBox status code.
10098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10099 * @param iSegReg The index of the segment register to use for
10100 * this access. The base and limits are checked.
10101 * @param GCPtrMem The address of the guest memory.
10102 * @param pu256Value Pointer to the value to store.
10103 */
10104IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10105{
10106 /* The lazy approach for now... */
10107 PRTUINT256U pu256Dst;
10108 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10109 if (rc == VINF_SUCCESS)
10110 {
10111 pu256Dst->au64[0] = pu256Value->au64[0];
10112 pu256Dst->au64[1] = pu256Value->au64[1];
10113 pu256Dst->au64[2] = pu256Value->au64[2];
10114 pu256Dst->au64[3] = pu256Value->au64[3];
10115 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10116 }
10117 return rc;
10118}
10119
10120
10121#ifdef IEM_WITH_SETJMP
10122/**
10123 * Stores a data dqword, longjmp on error.
10124 *
10125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10126 * @param iSegReg The index of the segment register to use for
10127 * this access. The base and limits are checked.
10128 * @param GCPtrMem The address of the guest memory.
10129 * @param pu256Value Pointer to the value to store.
10130 */
10131IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10132{
10133 /* The lazy approach for now... */
10134 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10135 pu256Dst->au64[0] = pu256Value->au64[0];
10136 pu256Dst->au64[1] = pu256Value->au64[1];
10137 pu256Dst->au64[2] = pu256Value->au64[2];
10138 pu256Dst->au64[3] = pu256Value->au64[3];
10139 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10140}
10141#endif
10142
10143
10144/**
10145 * Stores a data dqword, AVX aligned.
10146 *
10147 * @returns Strict VBox status code.
10148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10149 * @param iSegReg The index of the segment register to use for
10150 * this access. The base and limits are checked.
10151 * @param GCPtrMem The address of the guest memory.
10152 * @param pu256Value Pointer to the value to store.
10153 */
10154IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10155{
10156 /* The lazy approach for now... */
10157 if (GCPtrMem & 31)
10158 return iemRaiseGeneralProtectionFault0(pVCpu);
10159
10160 PRTUINT256U pu256Dst;
10161 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10162 if (rc == VINF_SUCCESS)
10163 {
10164 pu256Dst->au64[0] = pu256Value->au64[0];
10165 pu256Dst->au64[1] = pu256Value->au64[1];
10166 pu256Dst->au64[2] = pu256Value->au64[2];
10167 pu256Dst->au64[3] = pu256Value->au64[3];
10168 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10169 }
10170 return rc;
10171}
10172
10173
10174#ifdef IEM_WITH_SETJMP
10175/**
10176 * Stores a data dqword, AVX aligned.
10177 *
10178 * @returns Strict VBox status code.
10179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10180 * @param iSegReg The index of the segment register to use for
10181 * this access. The base and limits are checked.
10182 * @param GCPtrMem The address of the guest memory.
10183 * @param pu256Value Pointer to the value to store.
10184 */
10185DECL_NO_INLINE(IEM_STATIC, void)
10186iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10187{
10188 /* The lazy approach for now... */
10189 if ((GCPtrMem & 31) == 0)
10190 {
10191 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10192 pu256Dst->au64[0] = pu256Value->au64[0];
10193 pu256Dst->au64[1] = pu256Value->au64[1];
10194 pu256Dst->au64[2] = pu256Value->au64[2];
10195 pu256Dst->au64[3] = pu256Value->au64[3];
10196 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10197 return;
10198 }
10199
10200 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10201 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10202}
10203#endif
10204
10205
10206/**
10207 * Stores a descriptor register (sgdt, sidt).
10208 *
10209 * @returns Strict VBox status code.
10210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10211 * @param cbLimit The limit.
10212 * @param GCPtrBase The base address.
10213 * @param iSegReg The index of the segment register to use for
10214 * this access. The base and limits are checked.
10215 * @param GCPtrMem The address of the guest memory.
10216 */
10217IEM_STATIC VBOXSTRICTRC
10218iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10219{
10220 /*
10221 * The SIDT and SGDT instructions actually stores the data using two
10222 * independent writes. The instructions does not respond to opsize prefixes.
10223 */
10224 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10225 if (rcStrict == VINF_SUCCESS)
10226 {
10227 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10228 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10229 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10230 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10231 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10232 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10233 else
10234 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10235 }
10236 return rcStrict;
10237}
10238
10239
10240/**
10241 * Pushes a word onto the stack.
10242 *
10243 * @returns Strict VBox status code.
10244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10245 * @param u16Value The value to push.
10246 */
10247IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10248{
10249 /* Increment the stack pointer. */
10250 uint64_t uNewRsp;
10251 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10252
10253 /* Write the word the lazy way. */
10254 uint16_t *pu16Dst;
10255 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10256 if (rc == VINF_SUCCESS)
10257 {
10258 *pu16Dst = u16Value;
10259 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10260 }
10261
10262 /* Commit the new RSP value unless we an access handler made trouble. */
10263 if (rc == VINF_SUCCESS)
10264 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10265
10266 return rc;
10267}
10268
10269
10270/**
10271 * Pushes a dword onto the stack.
10272 *
10273 * @returns Strict VBox status code.
10274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10275 * @param u32Value The value to push.
10276 */
10277IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10278{
10279 /* Increment the stack pointer. */
10280 uint64_t uNewRsp;
10281 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10282
10283 /* Write the dword the lazy way. */
10284 uint32_t *pu32Dst;
10285 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10286 if (rc == VINF_SUCCESS)
10287 {
10288 *pu32Dst = u32Value;
10289 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10290 }
10291
10292 /* Commit the new RSP value unless we an access handler made trouble. */
10293 if (rc == VINF_SUCCESS)
10294 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10295
10296 return rc;
10297}
10298
10299
10300/**
10301 * Pushes a dword segment register value onto the stack.
10302 *
10303 * @returns Strict VBox status code.
10304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10305 * @param u32Value The value to push.
10306 */
10307IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10308{
10309 /* Increment the stack pointer. */
10310 uint64_t uNewRsp;
10311 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10312
10313 /* The intel docs talks about zero extending the selector register
10314 value. My actual intel CPU here might be zero extending the value
10315 but it still only writes the lower word... */
10316 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10317 * happens when crossing an electric page boundrary, is the high word checked
10318 * for write accessibility or not? Probably it is. What about segment limits?
10319 * It appears this behavior is also shared with trap error codes.
10320 *
10321 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10322 * ancient hardware when it actually did change. */
10323 uint16_t *pu16Dst;
10324 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10325 if (rc == VINF_SUCCESS)
10326 {
10327 *pu16Dst = (uint16_t)u32Value;
10328 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10329 }
10330
10331 /* Commit the new RSP value unless we an access handler made trouble. */
10332 if (rc == VINF_SUCCESS)
10333 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10334
10335 return rc;
10336}
10337
10338
10339/**
10340 * Pushes a qword onto the stack.
10341 *
10342 * @returns Strict VBox status code.
10343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10344 * @param u64Value The value to push.
10345 */
10346IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10347{
10348 /* Increment the stack pointer. */
10349 uint64_t uNewRsp;
10350 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10351
10352 /* Write the word the lazy way. */
10353 uint64_t *pu64Dst;
10354 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10355 if (rc == VINF_SUCCESS)
10356 {
10357 *pu64Dst = u64Value;
10358 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10359 }
10360
10361 /* Commit the new RSP value unless we an access handler made trouble. */
10362 if (rc == VINF_SUCCESS)
10363 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10364
10365 return rc;
10366}
10367
10368
10369/**
10370 * Pops a word from the stack.
10371 *
10372 * @returns Strict VBox status code.
10373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10374 * @param pu16Value Where to store the popped value.
10375 */
10376IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10377{
10378 /* Increment the stack pointer. */
10379 uint64_t uNewRsp;
10380 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10381
10382 /* Write the word the lazy way. */
10383 uint16_t const *pu16Src;
10384 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10385 if (rc == VINF_SUCCESS)
10386 {
10387 *pu16Value = *pu16Src;
10388 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10389
10390 /* Commit the new RSP value. */
10391 if (rc == VINF_SUCCESS)
10392 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10393 }
10394
10395 return rc;
10396}
10397
10398
10399/**
10400 * Pops a dword from the stack.
10401 *
10402 * @returns Strict VBox status code.
10403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10404 * @param pu32Value Where to store the popped value.
10405 */
10406IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10407{
10408 /* Increment the stack pointer. */
10409 uint64_t uNewRsp;
10410 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10411
10412 /* Write the word the lazy way. */
10413 uint32_t const *pu32Src;
10414 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10415 if (rc == VINF_SUCCESS)
10416 {
10417 *pu32Value = *pu32Src;
10418 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10419
10420 /* Commit the new RSP value. */
10421 if (rc == VINF_SUCCESS)
10422 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10423 }
10424
10425 return rc;
10426}
10427
10428
10429/**
10430 * Pops a qword from the stack.
10431 *
10432 * @returns Strict VBox status code.
10433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10434 * @param pu64Value Where to store the popped value.
10435 */
10436IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10437{
10438 /* Increment the stack pointer. */
10439 uint64_t uNewRsp;
10440 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10441
10442 /* Write the word the lazy way. */
10443 uint64_t const *pu64Src;
10444 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10445 if (rc == VINF_SUCCESS)
10446 {
10447 *pu64Value = *pu64Src;
10448 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10449
10450 /* Commit the new RSP value. */
10451 if (rc == VINF_SUCCESS)
10452 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10453 }
10454
10455 return rc;
10456}
10457
10458
10459/**
10460 * Pushes a word onto the stack, using a temporary stack pointer.
10461 *
10462 * @returns Strict VBox status code.
10463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10464 * @param u16Value The value to push.
10465 * @param pTmpRsp Pointer to the temporary stack pointer.
10466 */
10467IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10468{
10469 /* Increment the stack pointer. */
10470 RTUINT64U NewRsp = *pTmpRsp;
10471 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10472
10473 /* Write the word the lazy way. */
10474 uint16_t *pu16Dst;
10475 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10476 if (rc == VINF_SUCCESS)
10477 {
10478 *pu16Dst = u16Value;
10479 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10480 }
10481
10482 /* Commit the new RSP value unless we an access handler made trouble. */
10483 if (rc == VINF_SUCCESS)
10484 *pTmpRsp = NewRsp;
10485
10486 return rc;
10487}
10488
10489
10490/**
10491 * Pushes a dword onto the stack, using a temporary stack pointer.
10492 *
10493 * @returns Strict VBox status code.
10494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10495 * @param u32Value The value to push.
10496 * @param pTmpRsp Pointer to the temporary stack pointer.
10497 */
10498IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10499{
10500 /* Increment the stack pointer. */
10501 RTUINT64U NewRsp = *pTmpRsp;
10502 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10503
10504 /* Write the word the lazy way. */
10505 uint32_t *pu32Dst;
10506 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10507 if (rc == VINF_SUCCESS)
10508 {
10509 *pu32Dst = u32Value;
10510 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10511 }
10512
10513 /* Commit the new RSP value unless we an access handler made trouble. */
10514 if (rc == VINF_SUCCESS)
10515 *pTmpRsp = NewRsp;
10516
10517 return rc;
10518}
10519
10520
10521/**
10522 * Pushes a dword onto the stack, using a temporary stack pointer.
10523 *
10524 * @returns Strict VBox status code.
10525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10526 * @param u64Value The value to push.
10527 * @param pTmpRsp Pointer to the temporary stack pointer.
10528 */
10529IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10530{
10531 /* Increment the stack pointer. */
10532 RTUINT64U NewRsp = *pTmpRsp;
10533 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10534
10535 /* Write the word the lazy way. */
10536 uint64_t *pu64Dst;
10537 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10538 if (rc == VINF_SUCCESS)
10539 {
10540 *pu64Dst = u64Value;
10541 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10542 }
10543
10544 /* Commit the new RSP value unless we an access handler made trouble. */
10545 if (rc == VINF_SUCCESS)
10546 *pTmpRsp = NewRsp;
10547
10548 return rc;
10549}
10550
10551
10552/**
10553 * Pops a word from the stack, using a temporary stack pointer.
10554 *
10555 * @returns Strict VBox status code.
10556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10557 * @param pu16Value Where to store the popped value.
10558 * @param pTmpRsp Pointer to the temporary stack pointer.
10559 */
10560IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10561{
10562 /* Increment the stack pointer. */
10563 RTUINT64U NewRsp = *pTmpRsp;
10564 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10565
10566 /* Write the word the lazy way. */
10567 uint16_t const *pu16Src;
10568 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10569 if (rc == VINF_SUCCESS)
10570 {
10571 *pu16Value = *pu16Src;
10572 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10573
10574 /* Commit the new RSP value. */
10575 if (rc == VINF_SUCCESS)
10576 *pTmpRsp = NewRsp;
10577 }
10578
10579 return rc;
10580}
10581
10582
10583/**
10584 * Pops a dword from the stack, using a temporary stack pointer.
10585 *
10586 * @returns Strict VBox status code.
10587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10588 * @param pu32Value Where to store the popped value.
10589 * @param pTmpRsp Pointer to the temporary stack pointer.
10590 */
10591IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10592{
10593 /* Increment the stack pointer. */
10594 RTUINT64U NewRsp = *pTmpRsp;
10595 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10596
10597 /* Write the word the lazy way. */
10598 uint32_t const *pu32Src;
10599 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10600 if (rc == VINF_SUCCESS)
10601 {
10602 *pu32Value = *pu32Src;
10603 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10604
10605 /* Commit the new RSP value. */
10606 if (rc == VINF_SUCCESS)
10607 *pTmpRsp = NewRsp;
10608 }
10609
10610 return rc;
10611}
10612
10613
10614/**
10615 * Pops a qword from the stack, using a temporary stack pointer.
10616 *
10617 * @returns Strict VBox status code.
10618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10619 * @param pu64Value Where to store the popped value.
10620 * @param pTmpRsp Pointer to the temporary stack pointer.
10621 */
10622IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10623{
10624 /* Increment the stack pointer. */
10625 RTUINT64U NewRsp = *pTmpRsp;
10626 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10627
10628 /* Write the word the lazy way. */
10629 uint64_t const *pu64Src;
10630 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10631 if (rcStrict == VINF_SUCCESS)
10632 {
10633 *pu64Value = *pu64Src;
10634 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10635
10636 /* Commit the new RSP value. */
10637 if (rcStrict == VINF_SUCCESS)
10638 *pTmpRsp = NewRsp;
10639 }
10640
10641 return rcStrict;
10642}
10643
10644
10645/**
10646 * Begin a special stack push (used by interrupt, exceptions and such).
10647 *
10648 * This will raise \#SS or \#PF if appropriate.
10649 *
10650 * @returns Strict VBox status code.
10651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10652 * @param cbMem The number of bytes to push onto the stack.
10653 * @param ppvMem Where to return the pointer to the stack memory.
10654 * As with the other memory functions this could be
10655 * direct access or bounce buffered access, so
10656 * don't commit register until the commit call
10657 * succeeds.
10658 * @param puNewRsp Where to return the new RSP value. This must be
10659 * passed unchanged to
10660 * iemMemStackPushCommitSpecial().
10661 */
10662IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10663{
10664 Assert(cbMem < UINT8_MAX);
10665 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10666 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10667}
10668
10669
10670/**
10671 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10672 *
10673 * This will update the rSP.
10674 *
10675 * @returns Strict VBox status code.
10676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10677 * @param pvMem The pointer returned by
10678 * iemMemStackPushBeginSpecial().
10679 * @param uNewRsp The new RSP value returned by
10680 * iemMemStackPushBeginSpecial().
10681 */
10682IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10683{
10684 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10685 if (rcStrict == VINF_SUCCESS)
10686 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10687 return rcStrict;
10688}
10689
10690
10691/**
10692 * Begin a special stack pop (used by iret, retf and such).
10693 *
10694 * This will raise \#SS or \#PF if appropriate.
10695 *
10696 * @returns Strict VBox status code.
10697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10698 * @param cbMem The number of bytes to pop from the stack.
10699 * @param ppvMem Where to return the pointer to the stack memory.
10700 * @param puNewRsp Where to return the new RSP value. This must be
10701 * assigned to CPUMCTX::rsp manually some time
10702 * after iemMemStackPopDoneSpecial() has been
10703 * called.
10704 */
10705IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10706{
10707 Assert(cbMem < UINT8_MAX);
10708 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10709 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10710}
10711
10712
10713/**
10714 * Continue a special stack pop (used by iret and retf).
10715 *
10716 * This will raise \#SS or \#PF if appropriate.
10717 *
10718 * @returns Strict VBox status code.
10719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10720 * @param cbMem The number of bytes to pop from the stack.
10721 * @param ppvMem Where to return the pointer to the stack memory.
10722 * @param puNewRsp Where to return the new RSP value. This must be
10723 * assigned to CPUMCTX::rsp manually some time
10724 * after iemMemStackPopDoneSpecial() has been
10725 * called.
10726 */
10727IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10728{
10729 Assert(cbMem < UINT8_MAX);
10730 RTUINT64U NewRsp;
10731 NewRsp.u = *puNewRsp;
10732 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10733 *puNewRsp = NewRsp.u;
10734 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10735}
10736
10737
10738/**
10739 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10740 * iemMemStackPopContinueSpecial).
10741 *
10742 * The caller will manually commit the rSP.
10743 *
10744 * @returns Strict VBox status code.
10745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10746 * @param pvMem The pointer returned by
10747 * iemMemStackPopBeginSpecial() or
10748 * iemMemStackPopContinueSpecial().
10749 */
10750IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10751{
10752 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10753}
10754
10755
10756/**
10757 * Fetches a system table byte.
10758 *
10759 * @returns Strict VBox status code.
10760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10761 * @param pbDst Where to return the byte.
10762 * @param iSegReg The index of the segment register to use for
10763 * this access. The base and limits are checked.
10764 * @param GCPtrMem The address of the guest memory.
10765 */
10766IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10767{
10768 /* The lazy approach for now... */
10769 uint8_t const *pbSrc;
10770 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10771 if (rc == VINF_SUCCESS)
10772 {
10773 *pbDst = *pbSrc;
10774 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10775 }
10776 return rc;
10777}
10778
10779
10780/**
10781 * Fetches a system table word.
10782 *
10783 * @returns Strict VBox status code.
10784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10785 * @param pu16Dst Where to return the word.
10786 * @param iSegReg The index of the segment register to use for
10787 * this access. The base and limits are checked.
10788 * @param GCPtrMem The address of the guest memory.
10789 */
10790IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10791{
10792 /* The lazy approach for now... */
10793 uint16_t const *pu16Src;
10794 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10795 if (rc == VINF_SUCCESS)
10796 {
10797 *pu16Dst = *pu16Src;
10798 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10799 }
10800 return rc;
10801}
10802
10803
10804/**
10805 * Fetches a system table dword.
10806 *
10807 * @returns Strict VBox status code.
10808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10809 * @param pu32Dst Where to return the dword.
10810 * @param iSegReg The index of the segment register to use for
10811 * this access. The base and limits are checked.
10812 * @param GCPtrMem The address of the guest memory.
10813 */
10814IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10815{
10816 /* The lazy approach for now... */
10817 uint32_t const *pu32Src;
10818 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10819 if (rc == VINF_SUCCESS)
10820 {
10821 *pu32Dst = *pu32Src;
10822 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10823 }
10824 return rc;
10825}
10826
10827
10828/**
10829 * Fetches a system table qword.
10830 *
10831 * @returns Strict VBox status code.
10832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10833 * @param pu64Dst Where to return the qword.
10834 * @param iSegReg The index of the segment register to use for
10835 * this access. The base and limits are checked.
10836 * @param GCPtrMem The address of the guest memory.
10837 */
10838IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10839{
10840 /* The lazy approach for now... */
10841 uint64_t const *pu64Src;
10842 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10843 if (rc == VINF_SUCCESS)
10844 {
10845 *pu64Dst = *pu64Src;
10846 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10847 }
10848 return rc;
10849}
10850
10851
10852/**
10853 * Fetches a descriptor table entry with caller specified error code.
10854 *
10855 * @returns Strict VBox status code.
10856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10857 * @param pDesc Where to return the descriptor table entry.
10858 * @param uSel The selector which table entry to fetch.
10859 * @param uXcpt The exception to raise on table lookup error.
10860 * @param uErrorCode The error code associated with the exception.
10861 */
10862IEM_STATIC VBOXSTRICTRC
10863iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10864{
10865 AssertPtr(pDesc);
10866 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10867
10868 /** @todo did the 286 require all 8 bytes to be accessible? */
10869 /*
10870 * Get the selector table base and check bounds.
10871 */
10872 RTGCPTR GCPtrBase;
10873 if (uSel & X86_SEL_LDT)
10874 {
10875 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10876 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10877 {
10878 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10879 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10880 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10881 uErrorCode, 0);
10882 }
10883
10884 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10885 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10886 }
10887 else
10888 {
10889 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10890 {
10891 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10892 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10893 uErrorCode, 0);
10894 }
10895 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10896 }
10897
10898 /*
10899 * Read the legacy descriptor and maybe the long mode extensions if
10900 * required.
10901 */
10902 VBOXSTRICTRC rcStrict;
10903 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10904 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10905 else
10906 {
10907 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10908 if (rcStrict == VINF_SUCCESS)
10909 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10910 if (rcStrict == VINF_SUCCESS)
10911 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10912 if (rcStrict == VINF_SUCCESS)
10913 pDesc->Legacy.au16[3] = 0;
10914 else
10915 return rcStrict;
10916 }
10917
10918 if (rcStrict == VINF_SUCCESS)
10919 {
10920 if ( !IEM_IS_LONG_MODE(pVCpu)
10921 || pDesc->Legacy.Gen.u1DescType)
10922 pDesc->Long.au64[1] = 0;
10923 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10924 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10925 else
10926 {
10927 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10928 /** @todo is this the right exception? */
10929 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10930 }
10931 }
10932 return rcStrict;
10933}
10934
10935
10936/**
10937 * Fetches a descriptor table entry.
10938 *
10939 * @returns Strict VBox status code.
10940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10941 * @param pDesc Where to return the descriptor table entry.
10942 * @param uSel The selector which table entry to fetch.
10943 * @param uXcpt The exception to raise on table lookup error.
10944 */
10945IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10946{
10947 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10948}
10949
10950
10951/**
10952 * Fakes a long mode stack selector for SS = 0.
10953 *
10954 * @param pDescSs Where to return the fake stack descriptor.
10955 * @param uDpl The DPL we want.
10956 */
10957IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10958{
10959 pDescSs->Long.au64[0] = 0;
10960 pDescSs->Long.au64[1] = 0;
10961 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10962 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10963 pDescSs->Long.Gen.u2Dpl = uDpl;
10964 pDescSs->Long.Gen.u1Present = 1;
10965 pDescSs->Long.Gen.u1Long = 1;
10966}
10967
10968
10969/**
10970 * Marks the selector descriptor as accessed (only non-system descriptors).
10971 *
10972 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10973 * will therefore skip the limit checks.
10974 *
10975 * @returns Strict VBox status code.
10976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10977 * @param uSel The selector.
10978 */
10979IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10980{
10981 /*
10982 * Get the selector table base and calculate the entry address.
10983 */
10984 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10985 ? pVCpu->cpum.GstCtx.ldtr.u64Base
10986 : pVCpu->cpum.GstCtx.gdtr.pGdt;
10987 GCPtr += uSel & X86_SEL_MASK;
10988
10989 /*
10990 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10991 * ugly stuff to avoid this. This will make sure it's an atomic access
10992 * as well more or less remove any question about 8-bit or 32-bit accesss.
10993 */
10994 VBOXSTRICTRC rcStrict;
10995 uint32_t volatile *pu32;
10996 if ((GCPtr & 3) == 0)
10997 {
10998 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10999 GCPtr += 2 + 2;
11000 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11001 if (rcStrict != VINF_SUCCESS)
11002 return rcStrict;
11003 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11004 }
11005 else
11006 {
11007 /* The misaligned GDT/LDT case, map the whole thing. */
11008 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11009 if (rcStrict != VINF_SUCCESS)
11010 return rcStrict;
11011 switch ((uintptr_t)pu32 & 3)
11012 {
11013 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11014 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11015 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11016 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11017 }
11018 }
11019
11020 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11021}
11022
11023/** @} */
11024
11025
11026/*
11027 * Include the C/C++ implementation of instruction.
11028 */
11029#include "IEMAllCImpl.cpp.h"
11030
11031
11032
11033/** @name "Microcode" macros.
11034 *
11035 * The idea is that we should be able to use the same code to interpret
11036 * instructions as well as recompiler instructions. Thus this obfuscation.
11037 *
11038 * @{
11039 */
11040#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11041#define IEM_MC_END() }
11042#define IEM_MC_PAUSE() do {} while (0)
11043#define IEM_MC_CONTINUE() do {} while (0)
11044
11045/** Internal macro. */
11046#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11047 do \
11048 { \
11049 VBOXSTRICTRC rcStrict2 = a_Expr; \
11050 if (rcStrict2 != VINF_SUCCESS) \
11051 return rcStrict2; \
11052 } while (0)
11053
11054
11055#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11056#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11057#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11058#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11059#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11060#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11061#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11062#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11063#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11064 do { \
11065 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11066 return iemRaiseDeviceNotAvailable(pVCpu); \
11067 } while (0)
11068#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11069 do { \
11070 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11071 return iemRaiseDeviceNotAvailable(pVCpu); \
11072 } while (0)
11073#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11074 do { \
11075 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11076 return iemRaiseMathFault(pVCpu); \
11077 } while (0)
11078#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11079 do { \
11080 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11081 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11082 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11083 return iemRaiseUndefinedOpcode(pVCpu); \
11084 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11085 return iemRaiseDeviceNotAvailable(pVCpu); \
11086 } while (0)
11087#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11088 do { \
11089 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11090 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11091 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11092 return iemRaiseUndefinedOpcode(pVCpu); \
11093 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11094 return iemRaiseDeviceNotAvailable(pVCpu); \
11095 } while (0)
11096#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11097 do { \
11098 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11099 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11100 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11101 return iemRaiseUndefinedOpcode(pVCpu); \
11102 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11103 return iemRaiseDeviceNotAvailable(pVCpu); \
11104 } while (0)
11105#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11106 do { \
11107 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11108 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11109 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11110 return iemRaiseUndefinedOpcode(pVCpu); \
11111 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11112 return iemRaiseDeviceNotAvailable(pVCpu); \
11113 } while (0)
11114#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11115 do { \
11116 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11117 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11118 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11119 return iemRaiseUndefinedOpcode(pVCpu); \
11120 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11121 return iemRaiseDeviceNotAvailable(pVCpu); \
11122 } while (0)
11123#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11124 do { \
11125 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11126 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11127 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11128 return iemRaiseUndefinedOpcode(pVCpu); \
11129 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11130 return iemRaiseDeviceNotAvailable(pVCpu); \
11131 } while (0)
11132#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11133 do { \
11134 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11135 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11136 return iemRaiseUndefinedOpcode(pVCpu); \
11137 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11138 return iemRaiseDeviceNotAvailable(pVCpu); \
11139 } while (0)
11140#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11141 do { \
11142 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11143 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11144 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11145 return iemRaiseUndefinedOpcode(pVCpu); \
11146 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11147 return iemRaiseDeviceNotAvailable(pVCpu); \
11148 } while (0)
11149#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11150 do { \
11151 if (pVCpu->iem.s.uCpl != 0) \
11152 return iemRaiseGeneralProtectionFault0(pVCpu); \
11153 } while (0)
11154#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11155 do { \
11156 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11157 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11158 } while (0)
11159#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11160 do { \
11161 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11162 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11163 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11164 return iemRaiseUndefinedOpcode(pVCpu); \
11165 } while (0)
11166#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11167 do { \
11168 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11169 return iemRaiseGeneralProtectionFault0(pVCpu); \
11170 } while (0)
11171
11172
11173#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11174#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11175#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11176#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11177#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11178#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11179#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11180 uint32_t a_Name; \
11181 uint32_t *a_pName = &a_Name
11182#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11183 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11184
11185#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11186#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11187
11188#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11189#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11190#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11191#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11192#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11193#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11194#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11195#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11196#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11197#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11198#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11199#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11200#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11201#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11202#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11203#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11204#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11205#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11206 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11207 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11208 } while (0)
11209#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11210 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11211 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11212 } while (0)
11213#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11214 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11215 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11216 } while (0)
11217/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11218#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11219 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11220 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11221 } while (0)
11222#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11223 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11224 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11225 } while (0)
11226/** @note Not for IOPL or IF testing or modification. */
11227#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11228#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11229#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11230#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11231
11232#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11233#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11234#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11235#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11236#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11237#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11238#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11239#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11240#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11241#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11242/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11243#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11244 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11245 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11246 } while (0)
11247#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11248 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11249 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11250 } while (0)
11251#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11252 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11253
11254
11255#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11256#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11257/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11258 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11259#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11260#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11261/** @note Not for IOPL or IF testing or modification. */
11262#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11263
11264#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11265#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11266#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11267 do { \
11268 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11269 *pu32Reg += (a_u32Value); \
11270 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11271 } while (0)
11272#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11273
11274#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11275#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11276#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11277 do { \
11278 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11279 *pu32Reg -= (a_u32Value); \
11280 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11281 } while (0)
11282#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11283#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11284
11285#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11286#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11287#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11288#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11289#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11290#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11291#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11292
11293#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11294#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11295#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11296#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11297
11298#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11299#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11300#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11301
11302#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11303#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11304#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11305
11306#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11307#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11308#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11309
11310#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11311#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11312#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11313
11314#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11315
11316#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11317
11318#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11319#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11320#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11321 do { \
11322 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11323 *pu32Reg &= (a_u32Value); \
11324 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11325 } while (0)
11326#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11327
11328#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11329#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11330#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11331 do { \
11332 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11333 *pu32Reg |= (a_u32Value); \
11334 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11335 } while (0)
11336#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11337
11338
11339/** @note Not for IOPL or IF modification. */
11340#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11341/** @note Not for IOPL or IF modification. */
11342#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11343/** @note Not for IOPL or IF modification. */
11344#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11345
11346#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11347
11348/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11349#define IEM_MC_FPU_TO_MMX_MODE() do { \
11350 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11351 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11352 } while (0)
11353
11354/** Switches the FPU state from MMX mode (FTW=0xffff). */
11355#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11356 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11357 } while (0)
11358
11359#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11360 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11361#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11362 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11363#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11364 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11365 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11366 } while (0)
11367#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11368 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11369 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11370 } while (0)
11371#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11372 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11373#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11374 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11375#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11376 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11377
11378#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11379 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11380 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11381 } while (0)
11382#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11383 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11384#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11385 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11386#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11387 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11388#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11389 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11390 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11391 } while (0)
11392#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11393 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11394#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11395 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11396 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11397 } while (0)
11398#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11399 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11400#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11401 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11402 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11403 } while (0)
11404#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11405 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11406#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11407 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11408#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11409 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11410#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11411 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11412#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11413 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11414 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11415 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11416 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11417 } while (0)
11418
11419#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11420 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11421 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11422 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11423 } while (0)
11424#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11425 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11426 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11427 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11428 } while (0)
11429#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11430 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11431 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11432 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11433 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11434 } while (0)
11435#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11436 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11437 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11438 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11439 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11440 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11441 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11442 } while (0)
11443
11444#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11445#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11446 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11447 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11448 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11449 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11450 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11451 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11452 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11453 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11454 } while (0)
11455#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11456 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11457 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11458 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11459 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11460 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11461 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11462 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11463 } while (0)
11464#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11465 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11466 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11467 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11468 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11469 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11470 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11471 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11472 } while (0)
11473#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11474 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11475 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11476 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11477 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11478 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11479 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11480 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11481 } while (0)
11482
11483#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11484 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11485#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11486 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11487#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11488 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11489#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11490 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11491 uintptr_t const iYRegTmp = (a_iYReg); \
11492 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11493 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11494 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11495 } while (0)
11496
11497#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11498 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11499 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11500 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11501 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11502 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11503 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11504 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11505 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11506 } while (0)
11507#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11508 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11509 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11510 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11511 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11512 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11513 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11514 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11515 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11516 } while (0)
11517#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11518 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11519 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11520 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11521 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11522 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11523 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11524 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11525 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11526 } while (0)
11527
11528#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11529 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11530 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11531 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11532 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11533 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11534 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11535 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11536 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11537 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11538 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11539 } while (0)
11540#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11541 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11542 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11543 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11544 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11545 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11546 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11547 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11548 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11549 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11550 } while (0)
11551#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11552 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11553 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11554 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11555 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11556 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11557 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11558 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11559 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11560 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11561 } while (0)
11562#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11563 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11564 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11565 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11566 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11567 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11568 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11569 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11570 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11571 } while (0)
11572
11573#ifndef IEM_WITH_SETJMP
11574# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11575 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11576# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11577 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11578# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11579 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11580#else
11581# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11582 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11583# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11584 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11585# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11586 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11587#endif
11588
11589#ifndef IEM_WITH_SETJMP
11590# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11591 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11592# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11593 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11594# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11595 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11596#else
11597# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11598 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11599# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11600 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11601# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11602 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11603#endif
11604
11605#ifndef IEM_WITH_SETJMP
11606# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11607 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11608# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11609 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11610# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11611 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11612#else
11613# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11614 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11615# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11616 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11617# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11618 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11619#endif
11620
11621#ifdef SOME_UNUSED_FUNCTION
11622# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11623 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11624#endif
11625
11626#ifndef IEM_WITH_SETJMP
11627# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11628 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11629# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11630 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11631# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11632 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11633# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11634 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11635#else
11636# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11637 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11638# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11639 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11640# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11641 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11642# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11643 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11644#endif
11645
11646#ifndef IEM_WITH_SETJMP
11647# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11648 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11649# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11650 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11651# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11652 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11653#else
11654# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11655 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11656# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11657 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11658# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11659 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11660#endif
11661
11662#ifndef IEM_WITH_SETJMP
11663# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11664 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11665# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11666 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11667#else
11668# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11669 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11670# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11671 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11672#endif
11673
11674#ifndef IEM_WITH_SETJMP
11675# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11676 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11677# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11678 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11679#else
11680# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11681 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11682# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11683 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11684#endif
11685
11686
11687
11688#ifndef IEM_WITH_SETJMP
11689# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11690 do { \
11691 uint8_t u8Tmp; \
11692 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11693 (a_u16Dst) = u8Tmp; \
11694 } while (0)
11695# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11696 do { \
11697 uint8_t u8Tmp; \
11698 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11699 (a_u32Dst) = u8Tmp; \
11700 } while (0)
11701# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11702 do { \
11703 uint8_t u8Tmp; \
11704 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11705 (a_u64Dst) = u8Tmp; \
11706 } while (0)
11707# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11708 do { \
11709 uint16_t u16Tmp; \
11710 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11711 (a_u32Dst) = u16Tmp; \
11712 } while (0)
11713# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11714 do { \
11715 uint16_t u16Tmp; \
11716 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11717 (a_u64Dst) = u16Tmp; \
11718 } while (0)
11719# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11720 do { \
11721 uint32_t u32Tmp; \
11722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11723 (a_u64Dst) = u32Tmp; \
11724 } while (0)
11725#else /* IEM_WITH_SETJMP */
11726# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11727 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11728# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11729 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11730# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11731 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11732# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11733 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11734# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11735 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11736# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11737 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11738#endif /* IEM_WITH_SETJMP */
11739
11740#ifndef IEM_WITH_SETJMP
11741# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11742 do { \
11743 uint8_t u8Tmp; \
11744 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11745 (a_u16Dst) = (int8_t)u8Tmp; \
11746 } while (0)
11747# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11748 do { \
11749 uint8_t u8Tmp; \
11750 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11751 (a_u32Dst) = (int8_t)u8Tmp; \
11752 } while (0)
11753# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11754 do { \
11755 uint8_t u8Tmp; \
11756 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11757 (a_u64Dst) = (int8_t)u8Tmp; \
11758 } while (0)
11759# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11760 do { \
11761 uint16_t u16Tmp; \
11762 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11763 (a_u32Dst) = (int16_t)u16Tmp; \
11764 } while (0)
11765# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11766 do { \
11767 uint16_t u16Tmp; \
11768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11769 (a_u64Dst) = (int16_t)u16Tmp; \
11770 } while (0)
11771# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11772 do { \
11773 uint32_t u32Tmp; \
11774 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11775 (a_u64Dst) = (int32_t)u32Tmp; \
11776 } while (0)
11777#else /* IEM_WITH_SETJMP */
11778# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11779 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11780# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11781 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11782# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11783 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11784# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11785 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11786# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11787 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11788# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11789 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11790#endif /* IEM_WITH_SETJMP */
11791
11792#ifndef IEM_WITH_SETJMP
11793# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11794 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11795# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11796 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11797# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11798 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11799# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11800 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11801#else
11802# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11803 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11804# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11805 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11806# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11807 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11808# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11809 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11810#endif
11811
11812#ifndef IEM_WITH_SETJMP
11813# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11814 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11815# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11816 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11817# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11818 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11819# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11820 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11821#else
11822# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11823 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11824# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11825 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11826# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11827 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11828# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11829 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11830#endif
11831
11832#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11833#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11834#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11835#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11836#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11837#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11838#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11839 do { \
11840 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11841 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11842 } while (0)
11843
11844#ifndef IEM_WITH_SETJMP
11845# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11846 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11847# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11848 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11849#else
11850# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11851 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11852# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11853 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11854#endif
11855
11856#ifndef IEM_WITH_SETJMP
11857# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11858 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11859# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11860 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11861#else
11862# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11863 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11864# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11865 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11866#endif
11867
11868
11869#define IEM_MC_PUSH_U16(a_u16Value) \
11870 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11871#define IEM_MC_PUSH_U32(a_u32Value) \
11872 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11873#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11874 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11875#define IEM_MC_PUSH_U64(a_u64Value) \
11876 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11877
11878#define IEM_MC_POP_U16(a_pu16Value) \
11879 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11880#define IEM_MC_POP_U32(a_pu32Value) \
11881 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11882#define IEM_MC_POP_U64(a_pu64Value) \
11883 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11884
11885/** Maps guest memory for direct or bounce buffered access.
11886 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11887 * @remarks May return.
11888 */
11889#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11890 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11891
11892/** Maps guest memory for direct or bounce buffered access.
11893 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11894 * @remarks May return.
11895 */
11896#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11897 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11898
11899/** Commits the memory and unmaps the guest memory.
11900 * @remarks May return.
11901 */
11902#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11903 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11904
11905/** Commits the memory and unmaps the guest memory unless the FPU status word
11906 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11907 * that would cause FLD not to store.
11908 *
11909 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11910 * store, while \#P will not.
11911 *
11912 * @remarks May in theory return - for now.
11913 */
11914#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11915 do { \
11916 if ( !(a_u16FSW & X86_FSW_ES) \
11917 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11918 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11919 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11920 } while (0)
11921
11922/** Calculate efficient address from R/M. */
11923#ifndef IEM_WITH_SETJMP
11924# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11925 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11926#else
11927# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11928 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11929#endif
11930
11931#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11932#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11933#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11934#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11935#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11936#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11937#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11938
11939/**
11940 * Defers the rest of the instruction emulation to a C implementation routine
11941 * and returns, only taking the standard parameters.
11942 *
11943 * @param a_pfnCImpl The pointer to the C routine.
11944 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11945 */
11946#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11947
11948/**
11949 * Defers the rest of instruction emulation to a C implementation routine and
11950 * returns, taking one argument in addition to the standard ones.
11951 *
11952 * @param a_pfnCImpl The pointer to the C routine.
11953 * @param a0 The argument.
11954 */
11955#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11956
11957/**
11958 * Defers the rest of the instruction emulation to a C implementation routine
11959 * and returns, taking two arguments in addition to the standard ones.
11960 *
11961 * @param a_pfnCImpl The pointer to the C routine.
11962 * @param a0 The first extra argument.
11963 * @param a1 The second extra argument.
11964 */
11965#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11966
11967/**
11968 * Defers the rest of the instruction emulation to a C implementation routine
11969 * and returns, taking three arguments in addition to the standard ones.
11970 *
11971 * @param a_pfnCImpl The pointer to the C routine.
11972 * @param a0 The first extra argument.
11973 * @param a1 The second extra argument.
11974 * @param a2 The third extra argument.
11975 */
11976#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11977
11978/**
11979 * Defers the rest of the instruction emulation to a C implementation routine
11980 * and returns, taking four arguments in addition to the standard ones.
11981 *
11982 * @param a_pfnCImpl The pointer to the C routine.
11983 * @param a0 The first extra argument.
11984 * @param a1 The second extra argument.
11985 * @param a2 The third extra argument.
11986 * @param a3 The fourth extra argument.
11987 */
11988#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11989
11990/**
11991 * Defers the rest of the instruction emulation to a C implementation routine
11992 * and returns, taking two arguments in addition to the standard ones.
11993 *
11994 * @param a_pfnCImpl The pointer to the C routine.
11995 * @param a0 The first extra argument.
11996 * @param a1 The second extra argument.
11997 * @param a2 The third extra argument.
11998 * @param a3 The fourth extra argument.
11999 * @param a4 The fifth extra argument.
12000 */
12001#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12002
12003/**
12004 * Defers the entire instruction emulation to a C implementation routine and
12005 * returns, only taking the standard parameters.
12006 *
12007 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12008 *
12009 * @param a_pfnCImpl The pointer to the C routine.
12010 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12011 */
12012#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12013
12014/**
12015 * Defers the entire instruction emulation to a C implementation routine and
12016 * returns, taking one argument in addition to the standard ones.
12017 *
12018 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12019 *
12020 * @param a_pfnCImpl The pointer to the C routine.
12021 * @param a0 The argument.
12022 */
12023#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12024
12025/**
12026 * Defers the entire instruction emulation to a C implementation routine and
12027 * returns, taking two arguments in addition to the standard ones.
12028 *
12029 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12030 *
12031 * @param a_pfnCImpl The pointer to the C routine.
12032 * @param a0 The first extra argument.
12033 * @param a1 The second extra argument.
12034 */
12035#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12036
12037/**
12038 * Defers the entire instruction emulation to a C implementation routine and
12039 * returns, taking three arguments in addition to the standard ones.
12040 *
12041 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12042 *
12043 * @param a_pfnCImpl The pointer to the C routine.
12044 * @param a0 The first extra argument.
12045 * @param a1 The second extra argument.
12046 * @param a2 The third extra argument.
12047 */
12048#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12049
12050/**
12051 * Calls a FPU assembly implementation taking one visible argument.
12052 *
12053 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12054 * @param a0 The first extra argument.
12055 */
12056#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12057 do { \
12058 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12059 } while (0)
12060
12061/**
12062 * Calls a FPU assembly implementation taking two visible arguments.
12063 *
12064 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12065 * @param a0 The first extra argument.
12066 * @param a1 The second extra argument.
12067 */
12068#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12069 do { \
12070 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12071 } while (0)
12072
12073/**
12074 * Calls a FPU assembly implementation taking three visible arguments.
12075 *
12076 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12077 * @param a0 The first extra argument.
12078 * @param a1 The second extra argument.
12079 * @param a2 The third extra argument.
12080 */
12081#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12082 do { \
12083 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12084 } while (0)
12085
12086#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12087 do { \
12088 (a_FpuData).FSW = (a_FSW); \
12089 (a_FpuData).r80Result = *(a_pr80Value); \
12090 } while (0)
12091
12092/** Pushes FPU result onto the stack. */
12093#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12094 iemFpuPushResult(pVCpu, &a_FpuData)
12095/** Pushes FPU result onto the stack and sets the FPUDP. */
12096#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12097 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12098
12099/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12100#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12101 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12102
12103/** Stores FPU result in a stack register. */
12104#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12105 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12106/** Stores FPU result in a stack register and pops the stack. */
12107#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12108 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12109/** Stores FPU result in a stack register and sets the FPUDP. */
12110#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12111 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12112/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12113 * stack. */
12114#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12115 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12116
12117/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12118#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12119 iemFpuUpdateOpcodeAndIp(pVCpu)
12120/** Free a stack register (for FFREE and FFREEP). */
12121#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12122 iemFpuStackFree(pVCpu, a_iStReg)
12123/** Increment the FPU stack pointer. */
12124#define IEM_MC_FPU_STACK_INC_TOP() \
12125 iemFpuStackIncTop(pVCpu)
12126/** Decrement the FPU stack pointer. */
12127#define IEM_MC_FPU_STACK_DEC_TOP() \
12128 iemFpuStackDecTop(pVCpu)
12129
12130/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12131#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12132 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12133/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12134#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12135 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12136/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12137#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12138 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12139/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12140#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12141 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12142/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12143 * stack. */
12144#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12145 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12146/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12147#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12148 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12149
12150/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12151#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12152 iemFpuStackUnderflow(pVCpu, a_iStDst)
12153/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12154 * stack. */
12155#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12156 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12157/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12158 * FPUDS. */
12159#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12160 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12161/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12162 * FPUDS. Pops stack. */
12163#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12164 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12165/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12166 * stack twice. */
12167#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12168 iemFpuStackUnderflowThenPopPop(pVCpu)
12169/** Raises a FPU stack underflow exception for an instruction pushing a result
12170 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12171#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12172 iemFpuStackPushUnderflow(pVCpu)
12173/** Raises a FPU stack underflow exception for an instruction pushing a result
12174 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12175#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12176 iemFpuStackPushUnderflowTwo(pVCpu)
12177
12178/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12179 * FPUIP, FPUCS and FOP. */
12180#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12181 iemFpuStackPushOverflow(pVCpu)
12182/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12183 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12184#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12185 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12186/** Prepares for using the FPU state.
12187 * Ensures that we can use the host FPU in the current context (RC+R0.
12188 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12189#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12190/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12191#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12192/** Actualizes the guest FPU state so it can be accessed and modified. */
12193#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12194
12195/** Prepares for using the SSE state.
12196 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12197 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12198#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12199/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12200#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12201/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12202#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12203
12204/** Prepares for using the AVX state.
12205 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12206 * Ensures the guest AVX state in the CPUMCTX is up to date.
12207 * @note This will include the AVX512 state too when support for it is added
12208 * due to the zero extending feature of VEX instruction. */
12209#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12210/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12211#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12212/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12213#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12214
12215/**
12216 * Calls a MMX assembly implementation taking two visible arguments.
12217 *
12218 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12219 * @param a0 The first extra argument.
12220 * @param a1 The second extra argument.
12221 */
12222#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12223 do { \
12224 IEM_MC_PREPARE_FPU_USAGE(); \
12225 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12226 } while (0)
12227
12228/**
12229 * Calls a MMX assembly implementation taking three visible arguments.
12230 *
12231 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12232 * @param a0 The first extra argument.
12233 * @param a1 The second extra argument.
12234 * @param a2 The third extra argument.
12235 */
12236#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12237 do { \
12238 IEM_MC_PREPARE_FPU_USAGE(); \
12239 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12240 } while (0)
12241
12242
12243/**
12244 * Calls a SSE assembly implementation taking two visible arguments.
12245 *
12246 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12247 * @param a0 The first extra argument.
12248 * @param a1 The second extra argument.
12249 */
12250#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12251 do { \
12252 IEM_MC_PREPARE_SSE_USAGE(); \
12253 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12254 } while (0)
12255
12256/**
12257 * Calls a SSE assembly implementation taking three visible arguments.
12258 *
12259 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12260 * @param a0 The first extra argument.
12261 * @param a1 The second extra argument.
12262 * @param a2 The third extra argument.
12263 */
12264#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12265 do { \
12266 IEM_MC_PREPARE_SSE_USAGE(); \
12267 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12268 } while (0)
12269
12270
12271/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12272 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12273#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12274 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12275
12276/**
12277 * Calls a AVX assembly implementation taking two visible arguments.
12278 *
12279 * There is one implicit zero'th argument, a pointer to the extended state.
12280 *
12281 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12282 * @param a1 The first extra argument.
12283 * @param a2 The second extra argument.
12284 */
12285#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12286 do { \
12287 IEM_MC_PREPARE_AVX_USAGE(); \
12288 a_pfnAImpl(pXState, (a1), (a2)); \
12289 } while (0)
12290
12291/**
12292 * Calls a AVX assembly implementation taking three visible arguments.
12293 *
12294 * There is one implicit zero'th argument, a pointer to the extended state.
12295 *
12296 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12297 * @param a1 The first extra argument.
12298 * @param a2 The second extra argument.
12299 * @param a3 The third extra argument.
12300 */
12301#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12302 do { \
12303 IEM_MC_PREPARE_AVX_USAGE(); \
12304 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12305 } while (0)
12306
12307/** @note Not for IOPL or IF testing. */
12308#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12309/** @note Not for IOPL or IF testing. */
12310#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12311/** @note Not for IOPL or IF testing. */
12312#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12313/** @note Not for IOPL or IF testing. */
12314#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12315/** @note Not for IOPL or IF testing. */
12316#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12317 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12318 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12319/** @note Not for IOPL or IF testing. */
12320#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12321 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12322 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12323/** @note Not for IOPL or IF testing. */
12324#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12325 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12326 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12327 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12328/** @note Not for IOPL or IF testing. */
12329#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12330 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12331 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12332 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12333#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12334#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12335#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12336/** @note Not for IOPL or IF testing. */
12337#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12338 if ( pVCpu->cpum.GstCtx.cx != 0 \
12339 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12340/** @note Not for IOPL or IF testing. */
12341#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12342 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12343 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12344/** @note Not for IOPL or IF testing. */
12345#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12346 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12347 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12348/** @note Not for IOPL or IF testing. */
12349#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12350 if ( pVCpu->cpum.GstCtx.cx != 0 \
12351 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12352/** @note Not for IOPL or IF testing. */
12353#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12354 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12355 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12356/** @note Not for IOPL or IF testing. */
12357#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12358 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12359 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12360#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12361#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12362
12363#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12364 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12365#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12366 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12367#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12368 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12369#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12370 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12371#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12372 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12373#define IEM_MC_IF_FCW_IM() \
12374 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12375
12376#define IEM_MC_ELSE() } else {
12377#define IEM_MC_ENDIF() } do {} while (0)
12378
12379/** @} */
12380
12381
12382/** @name Opcode Debug Helpers.
12383 * @{
12384 */
12385#ifdef VBOX_WITH_STATISTICS
12386# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12387#else
12388# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12389#endif
12390
12391#ifdef DEBUG
12392# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12393 do { \
12394 IEMOP_INC_STATS(a_Stats); \
12395 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12396 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12397 } while (0)
12398
12399# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12400 do { \
12401 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12402 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12403 (void)RT_CONCAT(OP_,a_Upper); \
12404 (void)(a_fDisHints); \
12405 (void)(a_fIemHints); \
12406 } while (0)
12407
12408# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12409 do { \
12410 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12411 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12412 (void)RT_CONCAT(OP_,a_Upper); \
12413 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12414 (void)(a_fDisHints); \
12415 (void)(a_fIemHints); \
12416 } while (0)
12417
12418# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12419 do { \
12420 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12421 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12422 (void)RT_CONCAT(OP_,a_Upper); \
12423 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12424 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12425 (void)(a_fDisHints); \
12426 (void)(a_fIemHints); \
12427 } while (0)
12428
12429# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12430 do { \
12431 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12432 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12433 (void)RT_CONCAT(OP_,a_Upper); \
12434 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12435 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12436 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12437 (void)(a_fDisHints); \
12438 (void)(a_fIemHints); \
12439 } while (0)
12440
12441# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12442 do { \
12443 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12444 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12445 (void)RT_CONCAT(OP_,a_Upper); \
12446 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12447 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12448 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12449 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12450 (void)(a_fDisHints); \
12451 (void)(a_fIemHints); \
12452 } while (0)
12453
12454#else
12455# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12456
12457# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12458 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12459# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12460 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12461# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12462 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12463# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12464 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12465# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12466 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12467
12468#endif
12469
12470#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12471 IEMOP_MNEMONIC0EX(a_Lower, \
12472 #a_Lower, \
12473 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12474#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12475 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12476 #a_Lower " " #a_Op1, \
12477 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12478#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12479 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12480 #a_Lower " " #a_Op1 "," #a_Op2, \
12481 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12482#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12483 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12484 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12485 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12486#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12487 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12488 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12489 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12490
12491/** @} */
12492
12493
12494/** @name Opcode Helpers.
12495 * @{
12496 */
12497
12498#ifdef IN_RING3
12499# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12500 do { \
12501 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12502 else \
12503 { \
12504 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12505 return IEMOP_RAISE_INVALID_OPCODE(); \
12506 } \
12507 } while (0)
12508#else
12509# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12510 do { \
12511 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12512 else return IEMOP_RAISE_INVALID_OPCODE(); \
12513 } while (0)
12514#endif
12515
12516/** The instruction requires a 186 or later. */
12517#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12518# define IEMOP_HLP_MIN_186() do { } while (0)
12519#else
12520# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12521#endif
12522
12523/** The instruction requires a 286 or later. */
12524#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12525# define IEMOP_HLP_MIN_286() do { } while (0)
12526#else
12527# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12528#endif
12529
12530/** The instruction requires a 386 or later. */
12531#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12532# define IEMOP_HLP_MIN_386() do { } while (0)
12533#else
12534# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12535#endif
12536
12537/** The instruction requires a 386 or later if the given expression is true. */
12538#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12539# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12540#else
12541# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12542#endif
12543
12544/** The instruction requires a 486 or later. */
12545#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12546# define IEMOP_HLP_MIN_486() do { } while (0)
12547#else
12548# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12549#endif
12550
12551/** The instruction requires a Pentium (586) or later. */
12552#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12553# define IEMOP_HLP_MIN_586() do { } while (0)
12554#else
12555# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12556#endif
12557
12558/** The instruction requires a PentiumPro (686) or later. */
12559#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12560# define IEMOP_HLP_MIN_686() do { } while (0)
12561#else
12562# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12563#endif
12564
12565
12566/** The instruction raises an \#UD in real and V8086 mode. */
12567#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12568 do \
12569 { \
12570 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12571 else return IEMOP_RAISE_INVALID_OPCODE(); \
12572 } while (0)
12573
12574#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12575/** This instruction raises an \#UD in real and V8086 mode or when not using a
12576 * 64-bit code segment when in long mode (applicable to all VMX instructions
12577 * except VMCALL). */
12578# define IEMOP_HLP_VMX_INSTR() \
12579 do \
12580 { \
12581 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12582 && ( !IEM_IS_LONG_MODE(pVCpu) \
12583 || IEM_IS_64BIT_CODE(pVCpu))) \
12584 { /* likely */ } \
12585 else \
12586 return IEMOP_RAISE_INVALID_OPCODE(); \
12587 } while (0)
12588
12589/** The instruction can only be executed in VMX operation (VMX root mode and
12590 * non-root mode).
12591 */
12592# define IEMOP_HLP_IN_VMX_OPERATION() \
12593 do \
12594 { \
12595 if (IEM_IS_VMX_ROOT_MODE(pVCpu)) { /* likely */ } \
12596 else return IEMOP_RAISE_INVALID_OPCODE(); \
12597 } while (0)
12598#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12599
12600/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12601 * 64-bit mode. */
12602#define IEMOP_HLP_NO_64BIT() \
12603 do \
12604 { \
12605 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12606 return IEMOP_RAISE_INVALID_OPCODE(); \
12607 } while (0)
12608
12609/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12610 * 64-bit mode. */
12611#define IEMOP_HLP_ONLY_64BIT() \
12612 do \
12613 { \
12614 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12615 return IEMOP_RAISE_INVALID_OPCODE(); \
12616 } while (0)
12617
12618/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12619#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12620 do \
12621 { \
12622 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12623 iemRecalEffOpSize64Default(pVCpu); \
12624 } while (0)
12625
12626/** The instruction has 64-bit operand size if 64-bit mode. */
12627#define IEMOP_HLP_64BIT_OP_SIZE() \
12628 do \
12629 { \
12630 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12631 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12632 } while (0)
12633
12634/** Only a REX prefix immediately preceeding the first opcode byte takes
12635 * effect. This macro helps ensuring this as well as logging bad guest code. */
12636#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12637 do \
12638 { \
12639 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12640 { \
12641 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12642 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12643 pVCpu->iem.s.uRexB = 0; \
12644 pVCpu->iem.s.uRexIndex = 0; \
12645 pVCpu->iem.s.uRexReg = 0; \
12646 iemRecalEffOpSize(pVCpu); \
12647 } \
12648 } while (0)
12649
12650/**
12651 * Done decoding.
12652 */
12653#define IEMOP_HLP_DONE_DECODING() \
12654 do \
12655 { \
12656 /*nothing for now, maybe later... */ \
12657 } while (0)
12658
12659/**
12660 * Done decoding, raise \#UD exception if lock prefix present.
12661 */
12662#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12663 do \
12664 { \
12665 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12666 { /* likely */ } \
12667 else \
12668 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12669 } while (0)
12670
12671
12672/**
12673 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12674 * repnz or size prefixes are present, or if in real or v8086 mode.
12675 */
12676#define IEMOP_HLP_DONE_VEX_DECODING() \
12677 do \
12678 { \
12679 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12680 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12681 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12682 { /* likely */ } \
12683 else \
12684 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12685 } while (0)
12686
12687/**
12688 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12689 * repnz or size prefixes are present, or if in real or v8086 mode.
12690 */
12691#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12692 do \
12693 { \
12694 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12695 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12696 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12697 && pVCpu->iem.s.uVexLength == 0)) \
12698 { /* likely */ } \
12699 else \
12700 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12701 } while (0)
12702
12703
12704/**
12705 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12706 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12707 * register 0, or if in real or v8086 mode.
12708 */
12709#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12710 do \
12711 { \
12712 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12713 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12714 && !pVCpu->iem.s.uVex3rdReg \
12715 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12716 { /* likely */ } \
12717 else \
12718 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12719 } while (0)
12720
12721/**
12722 * Done decoding VEX, no V, L=0.
12723 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12724 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12725 */
12726#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12727 do \
12728 { \
12729 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12730 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12731 && pVCpu->iem.s.uVexLength == 0 \
12732 && pVCpu->iem.s.uVex3rdReg == 0 \
12733 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12734 { /* likely */ } \
12735 else \
12736 return IEMOP_RAISE_INVALID_OPCODE(); \
12737 } while (0)
12738
12739#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12740 do \
12741 { \
12742 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12743 { /* likely */ } \
12744 else \
12745 { \
12746 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12747 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12748 } \
12749 } while (0)
12750#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12751 do \
12752 { \
12753 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12754 { /* likely */ } \
12755 else \
12756 { \
12757 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12758 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12759 } \
12760 } while (0)
12761
12762/**
12763 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12764 * are present.
12765 */
12766#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12767 do \
12768 { \
12769 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12770 { /* likely */ } \
12771 else \
12772 return IEMOP_RAISE_INVALID_OPCODE(); \
12773 } while (0)
12774
12775/**
12776 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12777 * prefixes are present.
12778 */
12779#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12780 do \
12781 { \
12782 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12783 { /* likely */ } \
12784 else \
12785 return IEMOP_RAISE_INVALID_OPCODE(); \
12786 } while (0)
12787
12788
12789/**
12790 * Calculates the effective address of a ModR/M memory operand.
12791 *
12792 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12793 *
12794 * @return Strict VBox status code.
12795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12796 * @param bRm The ModRM byte.
12797 * @param cbImm The size of any immediate following the
12798 * effective address opcode bytes. Important for
12799 * RIP relative addressing.
12800 * @param pGCPtrEff Where to return the effective address.
12801 */
12802IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12803{
12804 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12805# define SET_SS_DEF() \
12806 do \
12807 { \
12808 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12809 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12810 } while (0)
12811
12812 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12813 {
12814/** @todo Check the effective address size crap! */
12815 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12816 {
12817 uint16_t u16EffAddr;
12818
12819 /* Handle the disp16 form with no registers first. */
12820 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12821 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12822 else
12823 {
12824 /* Get the displacment. */
12825 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12826 {
12827 case 0: u16EffAddr = 0; break;
12828 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12829 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12830 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12831 }
12832
12833 /* Add the base and index registers to the disp. */
12834 switch (bRm & X86_MODRM_RM_MASK)
12835 {
12836 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12837 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12838 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12839 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12840 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12841 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12842 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12843 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12844 }
12845 }
12846
12847 *pGCPtrEff = u16EffAddr;
12848 }
12849 else
12850 {
12851 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12852 uint32_t u32EffAddr;
12853
12854 /* Handle the disp32 form with no registers first. */
12855 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12856 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12857 else
12858 {
12859 /* Get the register (or SIB) value. */
12860 switch ((bRm & X86_MODRM_RM_MASK))
12861 {
12862 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12863 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12864 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12865 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12866 case 4: /* SIB */
12867 {
12868 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12869
12870 /* Get the index and scale it. */
12871 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12872 {
12873 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12874 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12875 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12876 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12877 case 4: u32EffAddr = 0; /*none */ break;
12878 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12879 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12880 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12881 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12882 }
12883 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12884
12885 /* add base */
12886 switch (bSib & X86_SIB_BASE_MASK)
12887 {
12888 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12889 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12890 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12891 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12892 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12893 case 5:
12894 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12895 {
12896 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12897 SET_SS_DEF();
12898 }
12899 else
12900 {
12901 uint32_t u32Disp;
12902 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12903 u32EffAddr += u32Disp;
12904 }
12905 break;
12906 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12907 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12908 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12909 }
12910 break;
12911 }
12912 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12913 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12914 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12915 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12916 }
12917
12918 /* Get and add the displacement. */
12919 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12920 {
12921 case 0:
12922 break;
12923 case 1:
12924 {
12925 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12926 u32EffAddr += i8Disp;
12927 break;
12928 }
12929 case 2:
12930 {
12931 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12932 u32EffAddr += u32Disp;
12933 break;
12934 }
12935 default:
12936 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12937 }
12938
12939 }
12940 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12941 *pGCPtrEff = u32EffAddr;
12942 else
12943 {
12944 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12945 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12946 }
12947 }
12948 }
12949 else
12950 {
12951 uint64_t u64EffAddr;
12952
12953 /* Handle the rip+disp32 form with no registers first. */
12954 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12955 {
12956 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12957 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12958 }
12959 else
12960 {
12961 /* Get the register (or SIB) value. */
12962 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12963 {
12964 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12965 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12966 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12967 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12968 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
12969 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12970 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12971 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12972 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12973 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12974 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12975 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12976 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12977 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
12978 /* SIB */
12979 case 4:
12980 case 12:
12981 {
12982 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12983
12984 /* Get the index and scale it. */
12985 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12986 {
12987 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12988 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12989 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12990 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12991 case 4: u64EffAddr = 0; /*none */ break;
12992 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
12993 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12994 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12995 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12996 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12997 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12998 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12999 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13000 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13001 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13002 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13003 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13004 }
13005 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13006
13007 /* add base */
13008 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13009 {
13010 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13011 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13012 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13013 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13014 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13015 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13016 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13017 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13018 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13019 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13020 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13021 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13022 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13023 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13024 /* complicated encodings */
13025 case 5:
13026 case 13:
13027 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13028 {
13029 if (!pVCpu->iem.s.uRexB)
13030 {
13031 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13032 SET_SS_DEF();
13033 }
13034 else
13035 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13036 }
13037 else
13038 {
13039 uint32_t u32Disp;
13040 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13041 u64EffAddr += (int32_t)u32Disp;
13042 }
13043 break;
13044 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13045 }
13046 break;
13047 }
13048 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13049 }
13050
13051 /* Get and add the displacement. */
13052 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13053 {
13054 case 0:
13055 break;
13056 case 1:
13057 {
13058 int8_t i8Disp;
13059 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13060 u64EffAddr += i8Disp;
13061 break;
13062 }
13063 case 2:
13064 {
13065 uint32_t u32Disp;
13066 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13067 u64EffAddr += (int32_t)u32Disp;
13068 break;
13069 }
13070 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13071 }
13072
13073 }
13074
13075 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13076 *pGCPtrEff = u64EffAddr;
13077 else
13078 {
13079 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13080 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13081 }
13082 }
13083
13084 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13085 return VINF_SUCCESS;
13086}
13087
13088
13089/**
13090 * Calculates the effective address of a ModR/M memory operand.
13091 *
13092 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13093 *
13094 * @return Strict VBox status code.
13095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13096 * @param bRm The ModRM byte.
13097 * @param cbImm The size of any immediate following the
13098 * effective address opcode bytes. Important for
13099 * RIP relative addressing.
13100 * @param pGCPtrEff Where to return the effective address.
13101 * @param offRsp RSP displacement.
13102 */
13103IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13104{
13105 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13106# define SET_SS_DEF() \
13107 do \
13108 { \
13109 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13110 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13111 } while (0)
13112
13113 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13114 {
13115/** @todo Check the effective address size crap! */
13116 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13117 {
13118 uint16_t u16EffAddr;
13119
13120 /* Handle the disp16 form with no registers first. */
13121 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13122 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13123 else
13124 {
13125 /* Get the displacment. */
13126 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13127 {
13128 case 0: u16EffAddr = 0; break;
13129 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13130 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13131 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13132 }
13133
13134 /* Add the base and index registers to the disp. */
13135 switch (bRm & X86_MODRM_RM_MASK)
13136 {
13137 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13138 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13139 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13140 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13141 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13142 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13143 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13144 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13145 }
13146 }
13147
13148 *pGCPtrEff = u16EffAddr;
13149 }
13150 else
13151 {
13152 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13153 uint32_t u32EffAddr;
13154
13155 /* Handle the disp32 form with no registers first. */
13156 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13157 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13158 else
13159 {
13160 /* Get the register (or SIB) value. */
13161 switch ((bRm & X86_MODRM_RM_MASK))
13162 {
13163 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13164 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13165 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13166 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13167 case 4: /* SIB */
13168 {
13169 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13170
13171 /* Get the index and scale it. */
13172 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13173 {
13174 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13175 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13176 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13177 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13178 case 4: u32EffAddr = 0; /*none */ break;
13179 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13180 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13181 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13182 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13183 }
13184 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13185
13186 /* add base */
13187 switch (bSib & X86_SIB_BASE_MASK)
13188 {
13189 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13190 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13191 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13192 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13193 case 4:
13194 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13195 SET_SS_DEF();
13196 break;
13197 case 5:
13198 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13199 {
13200 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13201 SET_SS_DEF();
13202 }
13203 else
13204 {
13205 uint32_t u32Disp;
13206 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13207 u32EffAddr += u32Disp;
13208 }
13209 break;
13210 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13211 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13212 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13213 }
13214 break;
13215 }
13216 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13217 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13218 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13219 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13220 }
13221
13222 /* Get and add the displacement. */
13223 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13224 {
13225 case 0:
13226 break;
13227 case 1:
13228 {
13229 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13230 u32EffAddr += i8Disp;
13231 break;
13232 }
13233 case 2:
13234 {
13235 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13236 u32EffAddr += u32Disp;
13237 break;
13238 }
13239 default:
13240 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13241 }
13242
13243 }
13244 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13245 *pGCPtrEff = u32EffAddr;
13246 else
13247 {
13248 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13249 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13250 }
13251 }
13252 }
13253 else
13254 {
13255 uint64_t u64EffAddr;
13256
13257 /* Handle the rip+disp32 form with no registers first. */
13258 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13259 {
13260 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13261 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13262 }
13263 else
13264 {
13265 /* Get the register (or SIB) value. */
13266 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13267 {
13268 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13269 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13270 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13271 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13272 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13273 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13274 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13275 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13276 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13277 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13278 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13279 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13280 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13281 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13282 /* SIB */
13283 case 4:
13284 case 12:
13285 {
13286 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13287
13288 /* Get the index and scale it. */
13289 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13290 {
13291 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13292 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13293 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13294 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13295 case 4: u64EffAddr = 0; /*none */ break;
13296 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13297 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13298 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13299 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13300 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13301 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13302 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13303 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13304 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13305 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13306 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13307 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13308 }
13309 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13310
13311 /* add base */
13312 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13313 {
13314 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13315 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13316 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13317 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13318 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13319 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13320 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13321 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13322 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13323 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13324 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13325 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13326 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13327 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13328 /* complicated encodings */
13329 case 5:
13330 case 13:
13331 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13332 {
13333 if (!pVCpu->iem.s.uRexB)
13334 {
13335 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13336 SET_SS_DEF();
13337 }
13338 else
13339 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13340 }
13341 else
13342 {
13343 uint32_t u32Disp;
13344 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13345 u64EffAddr += (int32_t)u32Disp;
13346 }
13347 break;
13348 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13349 }
13350 break;
13351 }
13352 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13353 }
13354
13355 /* Get and add the displacement. */
13356 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13357 {
13358 case 0:
13359 break;
13360 case 1:
13361 {
13362 int8_t i8Disp;
13363 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13364 u64EffAddr += i8Disp;
13365 break;
13366 }
13367 case 2:
13368 {
13369 uint32_t u32Disp;
13370 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13371 u64EffAddr += (int32_t)u32Disp;
13372 break;
13373 }
13374 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13375 }
13376
13377 }
13378
13379 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13380 *pGCPtrEff = u64EffAddr;
13381 else
13382 {
13383 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13384 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13385 }
13386 }
13387
13388 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13389 return VINF_SUCCESS;
13390}
13391
13392
13393#ifdef IEM_WITH_SETJMP
13394/**
13395 * Calculates the effective address of a ModR/M memory operand.
13396 *
13397 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13398 *
13399 * May longjmp on internal error.
13400 *
13401 * @return The effective address.
13402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13403 * @param bRm The ModRM byte.
13404 * @param cbImm The size of any immediate following the
13405 * effective address opcode bytes. Important for
13406 * RIP relative addressing.
13407 */
13408IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13409{
13410 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13411# define SET_SS_DEF() \
13412 do \
13413 { \
13414 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13415 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13416 } while (0)
13417
13418 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13419 {
13420/** @todo Check the effective address size crap! */
13421 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13422 {
13423 uint16_t u16EffAddr;
13424
13425 /* Handle the disp16 form with no registers first. */
13426 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13427 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13428 else
13429 {
13430 /* Get the displacment. */
13431 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13432 {
13433 case 0: u16EffAddr = 0; break;
13434 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13435 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13436 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13437 }
13438
13439 /* Add the base and index registers to the disp. */
13440 switch (bRm & X86_MODRM_RM_MASK)
13441 {
13442 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13443 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13444 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13445 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13446 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13447 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13448 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13449 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13450 }
13451 }
13452
13453 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13454 return u16EffAddr;
13455 }
13456
13457 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13458 uint32_t u32EffAddr;
13459
13460 /* Handle the disp32 form with no registers first. */
13461 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13462 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13463 else
13464 {
13465 /* Get the register (or SIB) value. */
13466 switch ((bRm & X86_MODRM_RM_MASK))
13467 {
13468 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13469 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13470 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13471 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13472 case 4: /* SIB */
13473 {
13474 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13475
13476 /* Get the index and scale it. */
13477 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13478 {
13479 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13480 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13481 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13482 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13483 case 4: u32EffAddr = 0; /*none */ break;
13484 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13485 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13486 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13487 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13488 }
13489 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13490
13491 /* add base */
13492 switch (bSib & X86_SIB_BASE_MASK)
13493 {
13494 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13495 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13496 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13497 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13498 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13499 case 5:
13500 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13501 {
13502 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13503 SET_SS_DEF();
13504 }
13505 else
13506 {
13507 uint32_t u32Disp;
13508 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13509 u32EffAddr += u32Disp;
13510 }
13511 break;
13512 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13513 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13514 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13515 }
13516 break;
13517 }
13518 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13519 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13520 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13521 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13522 }
13523
13524 /* Get and add the displacement. */
13525 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13526 {
13527 case 0:
13528 break;
13529 case 1:
13530 {
13531 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13532 u32EffAddr += i8Disp;
13533 break;
13534 }
13535 case 2:
13536 {
13537 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13538 u32EffAddr += u32Disp;
13539 break;
13540 }
13541 default:
13542 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13543 }
13544 }
13545
13546 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13547 {
13548 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13549 return u32EffAddr;
13550 }
13551 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13552 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13553 return u32EffAddr & UINT16_MAX;
13554 }
13555
13556 uint64_t u64EffAddr;
13557
13558 /* Handle the rip+disp32 form with no registers first. */
13559 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13560 {
13561 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13562 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13563 }
13564 else
13565 {
13566 /* Get the register (or SIB) value. */
13567 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13568 {
13569 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13570 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13571 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13572 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13573 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13574 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13575 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13576 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13577 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13578 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13579 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13580 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13581 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13582 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13583 /* SIB */
13584 case 4:
13585 case 12:
13586 {
13587 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13588
13589 /* Get the index and scale it. */
13590 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13591 {
13592 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13593 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13594 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13595 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13596 case 4: u64EffAddr = 0; /*none */ break;
13597 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13598 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13599 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13600 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13601 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13602 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13603 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13604 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13605 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13606 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13607 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13608 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13609 }
13610 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13611
13612 /* add base */
13613 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13614 {
13615 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13616 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13617 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13618 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13619 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13620 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13621 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13622 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13623 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13624 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13625 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13626 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13627 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13628 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13629 /* complicated encodings */
13630 case 5:
13631 case 13:
13632 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13633 {
13634 if (!pVCpu->iem.s.uRexB)
13635 {
13636 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13637 SET_SS_DEF();
13638 }
13639 else
13640 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13641 }
13642 else
13643 {
13644 uint32_t u32Disp;
13645 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13646 u64EffAddr += (int32_t)u32Disp;
13647 }
13648 break;
13649 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13650 }
13651 break;
13652 }
13653 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13654 }
13655
13656 /* Get and add the displacement. */
13657 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13658 {
13659 case 0:
13660 break;
13661 case 1:
13662 {
13663 int8_t i8Disp;
13664 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13665 u64EffAddr += i8Disp;
13666 break;
13667 }
13668 case 2:
13669 {
13670 uint32_t u32Disp;
13671 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13672 u64EffAddr += (int32_t)u32Disp;
13673 break;
13674 }
13675 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13676 }
13677
13678 }
13679
13680 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13681 {
13682 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13683 return u64EffAddr;
13684 }
13685 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13686 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13687 return u64EffAddr & UINT32_MAX;
13688}
13689#endif /* IEM_WITH_SETJMP */
13690
13691/** @} */
13692
13693
13694
13695/*
13696 * Include the instructions
13697 */
13698#include "IEMAllInstructions.cpp.h"
13699
13700
13701
13702#ifdef LOG_ENABLED
13703/**
13704 * Logs the current instruction.
13705 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13706 * @param fSameCtx Set if we have the same context information as the VMM,
13707 * clear if we may have already executed an instruction in
13708 * our debug context. When clear, we assume IEMCPU holds
13709 * valid CPU mode info.
13710 *
13711 * The @a fSameCtx parameter is now misleading and obsolete.
13712 * @param pszFunction The IEM function doing the execution.
13713 */
13714IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13715{
13716# ifdef IN_RING3
13717 if (LogIs2Enabled())
13718 {
13719 char szInstr[256];
13720 uint32_t cbInstr = 0;
13721 if (fSameCtx)
13722 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13723 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13724 szInstr, sizeof(szInstr), &cbInstr);
13725 else
13726 {
13727 uint32_t fFlags = 0;
13728 switch (pVCpu->iem.s.enmCpuMode)
13729 {
13730 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13731 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13732 case IEMMODE_16BIT:
13733 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13734 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13735 else
13736 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13737 break;
13738 }
13739 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13740 szInstr, sizeof(szInstr), &cbInstr);
13741 }
13742
13743 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13744 Log2(("**** %s\n"
13745 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13746 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13747 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13748 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13749 " %s\n"
13750 , pszFunction,
13751 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13752 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13753 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13754 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13755 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13756 szInstr));
13757
13758 if (LogIs3Enabled())
13759 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13760 }
13761 else
13762# endif
13763 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13764 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13765 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13766}
13767#endif /* LOG_ENABLED */
13768
13769
13770/**
13771 * Makes status code addjustments (pass up from I/O and access handler)
13772 * as well as maintaining statistics.
13773 *
13774 * @returns Strict VBox status code to pass up.
13775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13776 * @param rcStrict The status from executing an instruction.
13777 */
13778DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13779{
13780 if (rcStrict != VINF_SUCCESS)
13781 {
13782 if (RT_SUCCESS(rcStrict))
13783 {
13784 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13785 || rcStrict == VINF_IOM_R3_IOPORT_READ
13786 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13787 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13788 || rcStrict == VINF_IOM_R3_MMIO_READ
13789 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13790 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13791 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13792 || rcStrict == VINF_CPUM_R3_MSR_READ
13793 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13794 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13795 || rcStrict == VINF_EM_RAW_TO_R3
13796 || rcStrict == VINF_EM_TRIPLE_FAULT
13797 || rcStrict == VINF_GIM_R3_HYPERCALL
13798 /* raw-mode / virt handlers only: */
13799 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13800 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13801 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13802 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13803 || rcStrict == VINF_SELM_SYNC_GDT
13804 || rcStrict == VINF_CSAM_PENDING_ACTION
13805 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13806 /* nested hw.virt codes: */
13807 || rcStrict == VINF_SVM_VMEXIT
13808 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13809/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13810 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13811#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13812 if ( rcStrict == VINF_SVM_VMEXIT
13813 && rcPassUp == VINF_SUCCESS)
13814 rcStrict = VINF_SUCCESS;
13815 else
13816#endif
13817 if (rcPassUp == VINF_SUCCESS)
13818 pVCpu->iem.s.cRetInfStatuses++;
13819 else if ( rcPassUp < VINF_EM_FIRST
13820 || rcPassUp > VINF_EM_LAST
13821 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13822 {
13823 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13824 pVCpu->iem.s.cRetPassUpStatus++;
13825 rcStrict = rcPassUp;
13826 }
13827 else
13828 {
13829 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13830 pVCpu->iem.s.cRetInfStatuses++;
13831 }
13832 }
13833 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13834 pVCpu->iem.s.cRetAspectNotImplemented++;
13835 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13836 pVCpu->iem.s.cRetInstrNotImplemented++;
13837 else
13838 pVCpu->iem.s.cRetErrStatuses++;
13839 }
13840 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13841 {
13842 pVCpu->iem.s.cRetPassUpStatus++;
13843 rcStrict = pVCpu->iem.s.rcPassUp;
13844 }
13845
13846 return rcStrict;
13847}
13848
13849
13850/**
13851 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13852 * IEMExecOneWithPrefetchedByPC.
13853 *
13854 * Similar code is found in IEMExecLots.
13855 *
13856 * @return Strict VBox status code.
13857 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13858 * @param fExecuteInhibit If set, execute the instruction following CLI,
13859 * POP SS and MOV SS,GR.
13860 * @param pszFunction The calling function name.
13861 */
13862DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13863{
13864 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13865 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13866 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13867 RT_NOREF_PV(pszFunction);
13868
13869#ifdef IEM_WITH_SETJMP
13870 VBOXSTRICTRC rcStrict;
13871 jmp_buf JmpBuf;
13872 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13873 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13874 if ((rcStrict = setjmp(JmpBuf)) == 0)
13875 {
13876 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13877 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13878 }
13879 else
13880 pVCpu->iem.s.cLongJumps++;
13881 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13882#else
13883 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13884 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13885#endif
13886 if (rcStrict == VINF_SUCCESS)
13887 pVCpu->iem.s.cInstructions++;
13888 if (pVCpu->iem.s.cActiveMappings > 0)
13889 {
13890 Assert(rcStrict != VINF_SUCCESS);
13891 iemMemRollback(pVCpu);
13892 }
13893 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13894 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13895 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13896
13897//#ifdef DEBUG
13898// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13899//#endif
13900
13901 /* Execute the next instruction as well if a cli, pop ss or
13902 mov ss, Gr has just completed successfully. */
13903 if ( fExecuteInhibit
13904 && rcStrict == VINF_SUCCESS
13905 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13906 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
13907 {
13908 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13909 if (rcStrict == VINF_SUCCESS)
13910 {
13911#ifdef LOG_ENABLED
13912 iemLogCurInstr(pVCpu, false, pszFunction);
13913#endif
13914#ifdef IEM_WITH_SETJMP
13915 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13916 if ((rcStrict = setjmp(JmpBuf)) == 0)
13917 {
13918 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13919 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13920 }
13921 else
13922 pVCpu->iem.s.cLongJumps++;
13923 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13924#else
13925 IEM_OPCODE_GET_NEXT_U8(&b);
13926 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13927#endif
13928 if (rcStrict == VINF_SUCCESS)
13929 pVCpu->iem.s.cInstructions++;
13930 if (pVCpu->iem.s.cActiveMappings > 0)
13931 {
13932 Assert(rcStrict != VINF_SUCCESS);
13933 iemMemRollback(pVCpu);
13934 }
13935 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13936 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13937 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13938 }
13939 else if (pVCpu->iem.s.cActiveMappings > 0)
13940 iemMemRollback(pVCpu);
13941 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13942 }
13943
13944 /*
13945 * Return value fiddling, statistics and sanity assertions.
13946 */
13947 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13948
13949 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
13950 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
13951 return rcStrict;
13952}
13953
13954
13955#ifdef IN_RC
13956/**
13957 * Re-enters raw-mode or ensure we return to ring-3.
13958 *
13959 * @returns rcStrict, maybe modified.
13960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13961 * @param rcStrict The status code returne by the interpreter.
13962 */
13963DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13964{
13965 if ( !pVCpu->iem.s.fInPatchCode
13966 && ( rcStrict == VINF_SUCCESS
13967 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13968 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13969 {
13970 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13971 CPUMRawEnter(pVCpu);
13972 else
13973 {
13974 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13975 rcStrict = VINF_EM_RESCHEDULE;
13976 }
13977 }
13978 return rcStrict;
13979}
13980#endif
13981
13982
13983/**
13984 * Execute one instruction.
13985 *
13986 * @return Strict VBox status code.
13987 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13988 */
13989VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13990{
13991#ifdef LOG_ENABLED
13992 iemLogCurInstr(pVCpu, true, "IEMExecOne");
13993#endif
13994
13995 /*
13996 * Do the decoding and emulation.
13997 */
13998 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13999 if (rcStrict == VINF_SUCCESS)
14000 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14001 else if (pVCpu->iem.s.cActiveMappings > 0)
14002 iemMemRollback(pVCpu);
14003
14004#ifdef IN_RC
14005 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14006#endif
14007 if (rcStrict != VINF_SUCCESS)
14008 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14009 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14010 return rcStrict;
14011}
14012
14013
14014VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14015{
14016 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14017
14018 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14019 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14020 if (rcStrict == VINF_SUCCESS)
14021 {
14022 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14023 if (pcbWritten)
14024 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14025 }
14026 else if (pVCpu->iem.s.cActiveMappings > 0)
14027 iemMemRollback(pVCpu);
14028
14029#ifdef IN_RC
14030 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14031#endif
14032 return rcStrict;
14033}
14034
14035
14036VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14037 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14038{
14039 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14040
14041 VBOXSTRICTRC rcStrict;
14042 if ( cbOpcodeBytes
14043 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14044 {
14045 iemInitDecoder(pVCpu, false);
14046#ifdef IEM_WITH_CODE_TLB
14047 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14048 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14049 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14050 pVCpu->iem.s.offCurInstrStart = 0;
14051 pVCpu->iem.s.offInstrNextByte = 0;
14052#else
14053 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14054 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14055#endif
14056 rcStrict = VINF_SUCCESS;
14057 }
14058 else
14059 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14060 if (rcStrict == VINF_SUCCESS)
14061 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14062 else if (pVCpu->iem.s.cActiveMappings > 0)
14063 iemMemRollback(pVCpu);
14064
14065#ifdef IN_RC
14066 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14067#endif
14068 return rcStrict;
14069}
14070
14071
14072VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14073{
14074 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14075
14076 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14077 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14078 if (rcStrict == VINF_SUCCESS)
14079 {
14080 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14081 if (pcbWritten)
14082 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14083 }
14084 else if (pVCpu->iem.s.cActiveMappings > 0)
14085 iemMemRollback(pVCpu);
14086
14087#ifdef IN_RC
14088 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14089#endif
14090 return rcStrict;
14091}
14092
14093
14094VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14095 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14096{
14097 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14098
14099 VBOXSTRICTRC rcStrict;
14100 if ( cbOpcodeBytes
14101 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14102 {
14103 iemInitDecoder(pVCpu, true);
14104#ifdef IEM_WITH_CODE_TLB
14105 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14106 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14107 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14108 pVCpu->iem.s.offCurInstrStart = 0;
14109 pVCpu->iem.s.offInstrNextByte = 0;
14110#else
14111 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14112 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14113#endif
14114 rcStrict = VINF_SUCCESS;
14115 }
14116 else
14117 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14118 if (rcStrict == VINF_SUCCESS)
14119 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14120 else if (pVCpu->iem.s.cActiveMappings > 0)
14121 iemMemRollback(pVCpu);
14122
14123#ifdef IN_RC
14124 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14125#endif
14126 return rcStrict;
14127}
14128
14129
14130/**
14131 * For debugging DISGetParamSize, may come in handy.
14132 *
14133 * @returns Strict VBox status code.
14134 * @param pVCpu The cross context virtual CPU structure of the
14135 * calling EMT.
14136 * @param pCtxCore The context core structure.
14137 * @param OpcodeBytesPC The PC of the opcode bytes.
14138 * @param pvOpcodeBytes Prefeched opcode bytes.
14139 * @param cbOpcodeBytes Number of prefetched bytes.
14140 * @param pcbWritten Where to return the number of bytes written.
14141 * Optional.
14142 */
14143VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14144 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14145 uint32_t *pcbWritten)
14146{
14147 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14148
14149 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14150 VBOXSTRICTRC rcStrict;
14151 if ( cbOpcodeBytes
14152 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14153 {
14154 iemInitDecoder(pVCpu, true);
14155#ifdef IEM_WITH_CODE_TLB
14156 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14157 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14158 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14159 pVCpu->iem.s.offCurInstrStart = 0;
14160 pVCpu->iem.s.offInstrNextByte = 0;
14161#else
14162 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14163 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14164#endif
14165 rcStrict = VINF_SUCCESS;
14166 }
14167 else
14168 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14169 if (rcStrict == VINF_SUCCESS)
14170 {
14171 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14172 if (pcbWritten)
14173 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14174 }
14175 else if (pVCpu->iem.s.cActiveMappings > 0)
14176 iemMemRollback(pVCpu);
14177
14178#ifdef IN_RC
14179 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14180#endif
14181 return rcStrict;
14182}
14183
14184
14185VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14186{
14187 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14188
14189 /*
14190 * See if there is an interrupt pending in TRPM, inject it if we can.
14191 */
14192 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14193#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14194 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif;
14195 if (fIntrEnabled)
14196 {
14197 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14198 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu));
14199 else
14200 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14201 }
14202#else
14203 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14204#endif
14205 if ( fIntrEnabled
14206 && TRPMHasTrap(pVCpu)
14207 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14208 {
14209 uint8_t u8TrapNo;
14210 TRPMEVENT enmType;
14211 RTGCUINT uErrCode;
14212 RTGCPTR uCr2;
14213 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14214 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14215 TRPMResetTrap(pVCpu);
14216 }
14217
14218 /*
14219 * Initial decoder init w/ prefetch, then setup setjmp.
14220 */
14221 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14222 if (rcStrict == VINF_SUCCESS)
14223 {
14224#ifdef IEM_WITH_SETJMP
14225 jmp_buf JmpBuf;
14226 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14227 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14228 pVCpu->iem.s.cActiveMappings = 0;
14229 if ((rcStrict = setjmp(JmpBuf)) == 0)
14230#endif
14231 {
14232 /*
14233 * The run loop. We limit ourselves to 4096 instructions right now.
14234 */
14235 PVM pVM = pVCpu->CTX_SUFF(pVM);
14236 uint32_t cInstr = 4096;
14237 for (;;)
14238 {
14239 /*
14240 * Log the state.
14241 */
14242#ifdef LOG_ENABLED
14243 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14244#endif
14245
14246 /*
14247 * Do the decoding and emulation.
14248 */
14249 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14250 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14251 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14252 {
14253 Assert(pVCpu->iem.s.cActiveMappings == 0);
14254 pVCpu->iem.s.cInstructions++;
14255 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14256 {
14257 uint32_t fCpu = pVCpu->fLocalForcedActions
14258 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14259 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14260 | VMCPU_FF_TLB_FLUSH
14261#ifdef VBOX_WITH_RAW_MODE
14262 | VMCPU_FF_TRPM_SYNC_IDT
14263 | VMCPU_FF_SELM_SYNC_TSS
14264 | VMCPU_FF_SELM_SYNC_GDT
14265 | VMCPU_FF_SELM_SYNC_LDT
14266#endif
14267 | VMCPU_FF_INHIBIT_INTERRUPTS
14268 | VMCPU_FF_BLOCK_NMIS
14269 | VMCPU_FF_UNHALT ));
14270
14271 if (RT_LIKELY( ( !fCpu
14272 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14273 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14274 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14275 {
14276 if (cInstr-- > 0)
14277 {
14278 Assert(pVCpu->iem.s.cActiveMappings == 0);
14279 iemReInitDecoder(pVCpu);
14280 continue;
14281 }
14282 }
14283 }
14284 Assert(pVCpu->iem.s.cActiveMappings == 0);
14285 }
14286 else if (pVCpu->iem.s.cActiveMappings > 0)
14287 iemMemRollback(pVCpu);
14288 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14289 break;
14290 }
14291 }
14292#ifdef IEM_WITH_SETJMP
14293 else
14294 {
14295 if (pVCpu->iem.s.cActiveMappings > 0)
14296 iemMemRollback(pVCpu);
14297 pVCpu->iem.s.cLongJumps++;
14298 }
14299 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14300#endif
14301
14302 /*
14303 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14304 */
14305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14307 }
14308 else
14309 {
14310 if (pVCpu->iem.s.cActiveMappings > 0)
14311 iemMemRollback(pVCpu);
14312
14313#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14314 /*
14315 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14316 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14317 */
14318 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14319#endif
14320 }
14321
14322 /*
14323 * Maybe re-enter raw-mode and log.
14324 */
14325#ifdef IN_RC
14326 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14327#endif
14328 if (rcStrict != VINF_SUCCESS)
14329 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14330 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14331 if (pcInstructions)
14332 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14333 return rcStrict;
14334}
14335
14336
14337/**
14338 * Interface used by EMExecuteExec, does exit statistics and limits.
14339 *
14340 * @returns Strict VBox status code.
14341 * @param pVCpu The cross context virtual CPU structure.
14342 * @param fWillExit To be defined.
14343 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14344 * @param cMaxInstructions Maximum number of instructions to execute.
14345 * @param cMaxInstructionsWithoutExits
14346 * The max number of instructions without exits.
14347 * @param pStats Where to return statistics.
14348 */
14349VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14350 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14351{
14352 NOREF(fWillExit); /** @todo define flexible exit crits */
14353
14354 /*
14355 * Initialize return stats.
14356 */
14357 pStats->cInstructions = 0;
14358 pStats->cExits = 0;
14359 pStats->cMaxExitDistance = 0;
14360 pStats->cReserved = 0;
14361
14362 /*
14363 * Initial decoder init w/ prefetch, then setup setjmp.
14364 */
14365 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14366 if (rcStrict == VINF_SUCCESS)
14367 {
14368#ifdef IEM_WITH_SETJMP
14369 jmp_buf JmpBuf;
14370 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14371 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14372 pVCpu->iem.s.cActiveMappings = 0;
14373 if ((rcStrict = setjmp(JmpBuf)) == 0)
14374#endif
14375 {
14376#ifdef IN_RING0
14377 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14378#endif
14379 uint32_t cInstructionSinceLastExit = 0;
14380
14381 /*
14382 * The run loop. We limit ourselves to 4096 instructions right now.
14383 */
14384 PVM pVM = pVCpu->CTX_SUFF(pVM);
14385 for (;;)
14386 {
14387 /*
14388 * Log the state.
14389 */
14390#ifdef LOG_ENABLED
14391 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14392#endif
14393
14394 /*
14395 * Do the decoding and emulation.
14396 */
14397 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14398
14399 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14400 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14401
14402 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14403 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14404 {
14405 pStats->cExits += 1;
14406 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14407 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14408 cInstructionSinceLastExit = 0;
14409 }
14410
14411 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14412 {
14413 Assert(pVCpu->iem.s.cActiveMappings == 0);
14414 pVCpu->iem.s.cInstructions++;
14415 pStats->cInstructions++;
14416 cInstructionSinceLastExit++;
14417 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14418 {
14419 uint32_t fCpu = pVCpu->fLocalForcedActions
14420 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14421 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14422 | VMCPU_FF_TLB_FLUSH
14423#ifdef VBOX_WITH_RAW_MODE
14424 | VMCPU_FF_TRPM_SYNC_IDT
14425 | VMCPU_FF_SELM_SYNC_TSS
14426 | VMCPU_FF_SELM_SYNC_GDT
14427 | VMCPU_FF_SELM_SYNC_LDT
14428#endif
14429 | VMCPU_FF_INHIBIT_INTERRUPTS
14430 | VMCPU_FF_BLOCK_NMIS
14431 | VMCPU_FF_UNHALT ));
14432
14433 if (RT_LIKELY( ( ( !fCpu
14434 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14435 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14436 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) )
14437 || pStats->cInstructions < cMinInstructions))
14438 {
14439 if (pStats->cInstructions < cMaxInstructions)
14440 {
14441 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14442 {
14443#ifdef IN_RING0
14444 if ( !fCheckPreemptionPending
14445 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14446#endif
14447 {
14448 Assert(pVCpu->iem.s.cActiveMappings == 0);
14449 iemReInitDecoder(pVCpu);
14450 continue;
14451 }
14452#ifdef IN_RING0
14453 rcStrict = VINF_EM_RAW_INTERRUPT;
14454 break;
14455#endif
14456 }
14457 }
14458 }
14459 Assert(!(fCpu & VMCPU_FF_IEM));
14460 }
14461 Assert(pVCpu->iem.s.cActiveMappings == 0);
14462 }
14463 else if (pVCpu->iem.s.cActiveMappings > 0)
14464 iemMemRollback(pVCpu);
14465 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14466 break;
14467 }
14468 }
14469#ifdef IEM_WITH_SETJMP
14470 else
14471 {
14472 if (pVCpu->iem.s.cActiveMappings > 0)
14473 iemMemRollback(pVCpu);
14474 pVCpu->iem.s.cLongJumps++;
14475 }
14476 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14477#endif
14478
14479 /*
14480 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14481 */
14482 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14483 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14484 }
14485 else
14486 {
14487 if (pVCpu->iem.s.cActiveMappings > 0)
14488 iemMemRollback(pVCpu);
14489
14490#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14491 /*
14492 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14493 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14494 */
14495 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14496#endif
14497 }
14498
14499 /*
14500 * Maybe re-enter raw-mode and log.
14501 */
14502#ifdef IN_RC
14503 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14504#endif
14505 if (rcStrict != VINF_SUCCESS)
14506 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14507 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14508 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14509 return rcStrict;
14510}
14511
14512
14513/**
14514 * Injects a trap, fault, abort, software interrupt or external interrupt.
14515 *
14516 * The parameter list matches TRPMQueryTrapAll pretty closely.
14517 *
14518 * @returns Strict VBox status code.
14519 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14520 * @param u8TrapNo The trap number.
14521 * @param enmType What type is it (trap/fault/abort), software
14522 * interrupt or hardware interrupt.
14523 * @param uErrCode The error code if applicable.
14524 * @param uCr2 The CR2 value if applicable.
14525 * @param cbInstr The instruction length (only relevant for
14526 * software interrupts).
14527 */
14528VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14529 uint8_t cbInstr)
14530{
14531 iemInitDecoder(pVCpu, false);
14532#ifdef DBGFTRACE_ENABLED
14533 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14534 u8TrapNo, enmType, uErrCode, uCr2);
14535#endif
14536
14537 uint32_t fFlags;
14538 switch (enmType)
14539 {
14540 case TRPM_HARDWARE_INT:
14541 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14542 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14543 uErrCode = uCr2 = 0;
14544 break;
14545
14546 case TRPM_SOFTWARE_INT:
14547 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14548 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14549 uErrCode = uCr2 = 0;
14550 break;
14551
14552 case TRPM_TRAP:
14553 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14554 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14555 if (u8TrapNo == X86_XCPT_PF)
14556 fFlags |= IEM_XCPT_FLAGS_CR2;
14557 switch (u8TrapNo)
14558 {
14559 case X86_XCPT_DF:
14560 case X86_XCPT_TS:
14561 case X86_XCPT_NP:
14562 case X86_XCPT_SS:
14563 case X86_XCPT_PF:
14564 case X86_XCPT_AC:
14565 fFlags |= IEM_XCPT_FLAGS_ERR;
14566 break;
14567
14568 case X86_XCPT_NMI:
14569 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14570 break;
14571 }
14572 break;
14573
14574 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14575 }
14576
14577 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14578
14579 if (pVCpu->iem.s.cActiveMappings > 0)
14580 iemMemRollback(pVCpu);
14581
14582 return rcStrict;
14583}
14584
14585
14586/**
14587 * Injects the active TRPM event.
14588 *
14589 * @returns Strict VBox status code.
14590 * @param pVCpu The cross context virtual CPU structure.
14591 */
14592VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14593{
14594#ifndef IEM_IMPLEMENTS_TASKSWITCH
14595 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14596#else
14597 uint8_t u8TrapNo;
14598 TRPMEVENT enmType;
14599 RTGCUINT uErrCode;
14600 RTGCUINTPTR uCr2;
14601 uint8_t cbInstr;
14602 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14603 if (RT_FAILURE(rc))
14604 return rc;
14605
14606 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14607# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14608 if (rcStrict == VINF_SVM_VMEXIT)
14609 rcStrict = VINF_SUCCESS;
14610# endif
14611
14612 /** @todo Are there any other codes that imply the event was successfully
14613 * delivered to the guest? See @bugref{6607}. */
14614 if ( rcStrict == VINF_SUCCESS
14615 || rcStrict == VINF_IEM_RAISED_XCPT)
14616 TRPMResetTrap(pVCpu);
14617
14618 return rcStrict;
14619#endif
14620}
14621
14622
14623VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14624{
14625 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14626 return VERR_NOT_IMPLEMENTED;
14627}
14628
14629
14630VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14631{
14632 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14633 return VERR_NOT_IMPLEMENTED;
14634}
14635
14636
14637#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14638/**
14639 * Executes a IRET instruction with default operand size.
14640 *
14641 * This is for PATM.
14642 *
14643 * @returns VBox status code.
14644 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14645 * @param pCtxCore The register frame.
14646 */
14647VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14648{
14649 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14650
14651 iemCtxCoreToCtx(pCtx, pCtxCore);
14652 iemInitDecoder(pVCpu);
14653 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14654 if (rcStrict == VINF_SUCCESS)
14655 iemCtxToCtxCore(pCtxCore, pCtx);
14656 else
14657 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14658 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14659 return rcStrict;
14660}
14661#endif
14662
14663
14664/**
14665 * Macro used by the IEMExec* method to check the given instruction length.
14666 *
14667 * Will return on failure!
14668 *
14669 * @param a_cbInstr The given instruction length.
14670 * @param a_cbMin The minimum length.
14671 */
14672#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14673 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14674 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14675
14676
14677/**
14678 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14679 *
14680 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14681 *
14682 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14684 * @param rcStrict The status code to fiddle.
14685 */
14686DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14687{
14688 iemUninitExec(pVCpu);
14689#ifdef IN_RC
14690 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14691#else
14692 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14693#endif
14694}
14695
14696
14697/**
14698 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14699 *
14700 * This API ASSUMES that the caller has already verified that the guest code is
14701 * allowed to access the I/O port. (The I/O port is in the DX register in the
14702 * guest state.)
14703 *
14704 * @returns Strict VBox status code.
14705 * @param pVCpu The cross context virtual CPU structure.
14706 * @param cbValue The size of the I/O port access (1, 2, or 4).
14707 * @param enmAddrMode The addressing mode.
14708 * @param fRepPrefix Indicates whether a repeat prefix is used
14709 * (doesn't matter which for this instruction).
14710 * @param cbInstr The instruction length in bytes.
14711 * @param iEffSeg The effective segment address.
14712 * @param fIoChecked Whether the access to the I/O port has been
14713 * checked or not. It's typically checked in the
14714 * HM scenario.
14715 */
14716VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14717 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14718{
14719 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14720 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14721
14722 /*
14723 * State init.
14724 */
14725 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14726
14727 /*
14728 * Switch orgy for getting to the right handler.
14729 */
14730 VBOXSTRICTRC rcStrict;
14731 if (fRepPrefix)
14732 {
14733 switch (enmAddrMode)
14734 {
14735 case IEMMODE_16BIT:
14736 switch (cbValue)
14737 {
14738 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14739 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14740 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14741 default:
14742 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14743 }
14744 break;
14745
14746 case IEMMODE_32BIT:
14747 switch (cbValue)
14748 {
14749 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14750 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14751 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14752 default:
14753 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14754 }
14755 break;
14756
14757 case IEMMODE_64BIT:
14758 switch (cbValue)
14759 {
14760 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14761 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14762 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14763 default:
14764 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14765 }
14766 break;
14767
14768 default:
14769 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14770 }
14771 }
14772 else
14773 {
14774 switch (enmAddrMode)
14775 {
14776 case IEMMODE_16BIT:
14777 switch (cbValue)
14778 {
14779 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14780 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14781 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14782 default:
14783 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14784 }
14785 break;
14786
14787 case IEMMODE_32BIT:
14788 switch (cbValue)
14789 {
14790 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14791 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14792 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14793 default:
14794 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14795 }
14796 break;
14797
14798 case IEMMODE_64BIT:
14799 switch (cbValue)
14800 {
14801 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14802 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14803 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14804 default:
14805 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14806 }
14807 break;
14808
14809 default:
14810 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14811 }
14812 }
14813
14814 if (pVCpu->iem.s.cActiveMappings)
14815 iemMemRollback(pVCpu);
14816
14817 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14818}
14819
14820
14821/**
14822 * Interface for HM and EM for executing string I/O IN (read) instructions.
14823 *
14824 * This API ASSUMES that the caller has already verified that the guest code is
14825 * allowed to access the I/O port. (The I/O port is in the DX register in the
14826 * guest state.)
14827 *
14828 * @returns Strict VBox status code.
14829 * @param pVCpu The cross context virtual CPU structure.
14830 * @param cbValue The size of the I/O port access (1, 2, or 4).
14831 * @param enmAddrMode The addressing mode.
14832 * @param fRepPrefix Indicates whether a repeat prefix is used
14833 * (doesn't matter which for this instruction).
14834 * @param cbInstr The instruction length in bytes.
14835 * @param fIoChecked Whether the access to the I/O port has been
14836 * checked or not. It's typically checked in the
14837 * HM scenario.
14838 */
14839VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14840 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14841{
14842 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14843
14844 /*
14845 * State init.
14846 */
14847 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14848
14849 /*
14850 * Switch orgy for getting to the right handler.
14851 */
14852 VBOXSTRICTRC rcStrict;
14853 if (fRepPrefix)
14854 {
14855 switch (enmAddrMode)
14856 {
14857 case IEMMODE_16BIT:
14858 switch (cbValue)
14859 {
14860 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14861 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14862 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14863 default:
14864 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14865 }
14866 break;
14867
14868 case IEMMODE_32BIT:
14869 switch (cbValue)
14870 {
14871 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14872 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14873 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14874 default:
14875 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14876 }
14877 break;
14878
14879 case IEMMODE_64BIT:
14880 switch (cbValue)
14881 {
14882 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14883 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14884 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14885 default:
14886 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14887 }
14888 break;
14889
14890 default:
14891 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14892 }
14893 }
14894 else
14895 {
14896 switch (enmAddrMode)
14897 {
14898 case IEMMODE_16BIT:
14899 switch (cbValue)
14900 {
14901 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14902 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14903 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14904 default:
14905 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14906 }
14907 break;
14908
14909 case IEMMODE_32BIT:
14910 switch (cbValue)
14911 {
14912 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14913 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14914 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14915 default:
14916 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14917 }
14918 break;
14919
14920 case IEMMODE_64BIT:
14921 switch (cbValue)
14922 {
14923 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14924 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14925 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14926 default:
14927 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14928 }
14929 break;
14930
14931 default:
14932 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14933 }
14934 }
14935
14936 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
14937 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14938}
14939
14940
14941/**
14942 * Interface for rawmode to write execute an OUT instruction.
14943 *
14944 * @returns Strict VBox status code.
14945 * @param pVCpu The cross context virtual CPU structure.
14946 * @param cbInstr The instruction length in bytes.
14947 * @param u16Port The port to read.
14948 * @param cbReg The register size.
14949 *
14950 * @remarks In ring-0 not all of the state needs to be synced in.
14951 */
14952VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14953{
14954 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14955 Assert(cbReg <= 4 && cbReg != 3);
14956
14957 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14958 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14959 Assert(!pVCpu->iem.s.cActiveMappings);
14960 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14961}
14962
14963
14964/**
14965 * Interface for rawmode to write execute an IN instruction.
14966 *
14967 * @returns Strict VBox status code.
14968 * @param pVCpu The cross context virtual CPU structure.
14969 * @param cbInstr The instruction length in bytes.
14970 * @param u16Port The port to read.
14971 * @param cbReg The register size.
14972 */
14973VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14974{
14975 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14976 Assert(cbReg <= 4 && cbReg != 3);
14977
14978 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14979 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14980 Assert(!pVCpu->iem.s.cActiveMappings);
14981 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14982}
14983
14984
14985/**
14986 * Interface for HM and EM to write to a CRx register.
14987 *
14988 * @returns Strict VBox status code.
14989 * @param pVCpu The cross context virtual CPU structure.
14990 * @param cbInstr The instruction length in bytes.
14991 * @param iCrReg The control register number (destination).
14992 * @param iGReg The general purpose register number (source).
14993 *
14994 * @remarks In ring-0 not all of the state needs to be synced in.
14995 */
14996VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14997{
14998 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14999 Assert(iCrReg < 16);
15000 Assert(iGReg < 16);
15001
15002 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15003 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15004 Assert(!pVCpu->iem.s.cActiveMappings);
15005 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15006}
15007
15008
15009/**
15010 * Interface for HM and EM to read from a CRx register.
15011 *
15012 * @returns Strict VBox status code.
15013 * @param pVCpu The cross context virtual CPU structure.
15014 * @param cbInstr The instruction length in bytes.
15015 * @param iGReg The general purpose register number (destination).
15016 * @param iCrReg The control register number (source).
15017 *
15018 * @remarks In ring-0 not all of the state needs to be synced in.
15019 */
15020VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15021{
15022 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15023 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15024 | CPUMCTX_EXTRN_APIC_TPR);
15025 Assert(iCrReg < 16);
15026 Assert(iGReg < 16);
15027
15028 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15029 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15030 Assert(!pVCpu->iem.s.cActiveMappings);
15031 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15032}
15033
15034
15035/**
15036 * Interface for HM and EM to clear the CR0[TS] bit.
15037 *
15038 * @returns Strict VBox status code.
15039 * @param pVCpu The cross context virtual CPU structure.
15040 * @param cbInstr The instruction length in bytes.
15041 *
15042 * @remarks In ring-0 not all of the state needs to be synced in.
15043 */
15044VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15045{
15046 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15047
15048 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15049 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15050 Assert(!pVCpu->iem.s.cActiveMappings);
15051 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15052}
15053
15054
15055/**
15056 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15057 *
15058 * @returns Strict VBox status code.
15059 * @param pVCpu The cross context virtual CPU structure.
15060 * @param cbInstr The instruction length in bytes.
15061 * @param uValue The value to load into CR0.
15062 *
15063 * @remarks In ring-0 not all of the state needs to be synced in.
15064 */
15065VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15066{
15067 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15068
15069 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15070 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15071 Assert(!pVCpu->iem.s.cActiveMappings);
15072 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15073}
15074
15075
15076/**
15077 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15078 *
15079 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15080 *
15081 * @returns Strict VBox status code.
15082 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15083 * @param cbInstr The instruction length in bytes.
15084 * @remarks In ring-0 not all of the state needs to be synced in.
15085 * @thread EMT(pVCpu)
15086 */
15087VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15088{
15089 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15090
15091 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15092 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15093 Assert(!pVCpu->iem.s.cActiveMappings);
15094 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15095}
15096
15097
15098/**
15099 * Interface for HM and EM to emulate the WBINVD instruction.
15100 *
15101 * @returns Strict VBox status code.
15102 * @param pVCpu The cross context virtual CPU structure.
15103 * @param cbInstr The instruction length in bytes.
15104 *
15105 * @remarks In ring-0 not all of the state needs to be synced in.
15106 */
15107VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15108{
15109 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15110
15111 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15112 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15113 Assert(!pVCpu->iem.s.cActiveMappings);
15114 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15115}
15116
15117
15118/**
15119 * Interface for HM and EM to emulate the INVD instruction.
15120 *
15121 * @returns Strict VBox status code.
15122 * @param pVCpu The cross context virtual CPU structure.
15123 * @param cbInstr The instruction length in bytes.
15124 *
15125 * @remarks In ring-0 not all of the state needs to be synced in.
15126 */
15127VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15128{
15129 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15130
15131 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15132 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15133 Assert(!pVCpu->iem.s.cActiveMappings);
15134 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15135}
15136
15137
15138/**
15139 * Interface for HM and EM to emulate the INVLPG instruction.
15140 *
15141 * @returns Strict VBox status code.
15142 * @retval VINF_PGM_SYNC_CR3
15143 *
15144 * @param pVCpu The cross context virtual CPU structure.
15145 * @param cbInstr The instruction length in bytes.
15146 * @param GCPtrPage The effective address of the page to invalidate.
15147 *
15148 * @remarks In ring-0 not all of the state needs to be synced in.
15149 */
15150VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15151{
15152 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15153
15154 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15155 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15156 Assert(!pVCpu->iem.s.cActiveMappings);
15157 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15158}
15159
15160
15161/**
15162 * Interface for HM and EM to emulate the CPUID instruction.
15163 *
15164 * @returns Strict VBox status code.
15165 *
15166 * @param pVCpu The cross context virtual CPU structure.
15167 * @param cbInstr The instruction length in bytes.
15168 *
15169 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15170 */
15171VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15172{
15173 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15174 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15175
15176 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15177 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15178 Assert(!pVCpu->iem.s.cActiveMappings);
15179 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15180}
15181
15182
15183/**
15184 * Interface for HM and EM to emulate the RDPMC instruction.
15185 *
15186 * @returns Strict VBox status code.
15187 *
15188 * @param pVCpu The cross context virtual CPU structure.
15189 * @param cbInstr The instruction length in bytes.
15190 *
15191 * @remarks Not all of the state needs to be synced in.
15192 */
15193VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15194{
15195 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15196 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15197
15198 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15199 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15200 Assert(!pVCpu->iem.s.cActiveMappings);
15201 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15202}
15203
15204
15205/**
15206 * Interface for HM and EM to emulate the RDTSC instruction.
15207 *
15208 * @returns Strict VBox status code.
15209 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15210 *
15211 * @param pVCpu The cross context virtual CPU structure.
15212 * @param cbInstr The instruction length in bytes.
15213 *
15214 * @remarks Not all of the state needs to be synced in.
15215 */
15216VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15217{
15218 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15219 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15220
15221 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15222 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15223 Assert(!pVCpu->iem.s.cActiveMappings);
15224 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15225}
15226
15227
15228/**
15229 * Interface for HM and EM to emulate the RDTSCP instruction.
15230 *
15231 * @returns Strict VBox status code.
15232 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15233 *
15234 * @param pVCpu The cross context virtual CPU structure.
15235 * @param cbInstr The instruction length in bytes.
15236 *
15237 * @remarks Not all of the state needs to be synced in. Recommended
15238 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15239 */
15240VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15241{
15242 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15243 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15244
15245 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15246 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15247 Assert(!pVCpu->iem.s.cActiveMappings);
15248 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15249}
15250
15251
15252/**
15253 * Interface for HM and EM to emulate the RDMSR instruction.
15254 *
15255 * @returns Strict VBox status code.
15256 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15257 *
15258 * @param pVCpu The cross context virtual CPU structure.
15259 * @param cbInstr The instruction length in bytes.
15260 *
15261 * @remarks Not all of the state needs to be synced in. Requires RCX and
15262 * (currently) all MSRs.
15263 */
15264VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15265{
15266 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15267 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15268
15269 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15270 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15271 Assert(!pVCpu->iem.s.cActiveMappings);
15272 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15273}
15274
15275
15276/**
15277 * Interface for HM and EM to emulate the WRMSR instruction.
15278 *
15279 * @returns Strict VBox status code.
15280 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15281 *
15282 * @param pVCpu The cross context virtual CPU structure.
15283 * @param cbInstr The instruction length in bytes.
15284 *
15285 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15286 * and (currently) all MSRs.
15287 */
15288VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15289{
15290 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15291 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15292 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15293
15294 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15295 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15296 Assert(!pVCpu->iem.s.cActiveMappings);
15297 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15298}
15299
15300
15301/**
15302 * Interface for HM and EM to emulate the MONITOR instruction.
15303 *
15304 * @returns Strict VBox status code.
15305 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15306 *
15307 * @param pVCpu The cross context virtual CPU structure.
15308 * @param cbInstr The instruction length in bytes.
15309 *
15310 * @remarks Not all of the state needs to be synced in.
15311 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15312 * are used.
15313 */
15314VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15315{
15316 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15317 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15318
15319 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15320 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15321 Assert(!pVCpu->iem.s.cActiveMappings);
15322 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15323}
15324
15325
15326/**
15327 * Interface for HM and EM to emulate the MWAIT instruction.
15328 *
15329 * @returns Strict VBox status code.
15330 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15331 *
15332 * @param pVCpu The cross context virtual CPU structure.
15333 * @param cbInstr The instruction length in bytes.
15334 *
15335 * @remarks Not all of the state needs to be synced in.
15336 */
15337VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15338{
15339 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15340
15341 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15342 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15343 Assert(!pVCpu->iem.s.cActiveMappings);
15344 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15345}
15346
15347
15348/**
15349 * Interface for HM and EM to emulate the HLT instruction.
15350 *
15351 * @returns Strict VBox status code.
15352 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15353 *
15354 * @param pVCpu The cross context virtual CPU structure.
15355 * @param cbInstr The instruction length in bytes.
15356 *
15357 * @remarks Not all of the state needs to be synced in.
15358 */
15359VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15360{
15361 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15362
15363 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15364 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15365 Assert(!pVCpu->iem.s.cActiveMappings);
15366 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15367}
15368
15369
15370/**
15371 * Checks if IEM is in the process of delivering an event (interrupt or
15372 * exception).
15373 *
15374 * @returns true if we're in the process of raising an interrupt or exception,
15375 * false otherwise.
15376 * @param pVCpu The cross context virtual CPU structure.
15377 * @param puVector Where to store the vector associated with the
15378 * currently delivered event, optional.
15379 * @param pfFlags Where to store th event delivery flags (see
15380 * IEM_XCPT_FLAGS_XXX), optional.
15381 * @param puErr Where to store the error code associated with the
15382 * event, optional.
15383 * @param puCr2 Where to store the CR2 associated with the event,
15384 * optional.
15385 * @remarks The caller should check the flags to determine if the error code and
15386 * CR2 are valid for the event.
15387 */
15388VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15389{
15390 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15391 if (fRaisingXcpt)
15392 {
15393 if (puVector)
15394 *puVector = pVCpu->iem.s.uCurXcpt;
15395 if (pfFlags)
15396 *pfFlags = pVCpu->iem.s.fCurXcpt;
15397 if (puErr)
15398 *puErr = pVCpu->iem.s.uCurXcptErr;
15399 if (puCr2)
15400 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15401 }
15402 return fRaisingXcpt;
15403}
15404
15405#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15406
15407/**
15408 * Interface for HM and EM to emulate the CLGI instruction.
15409 *
15410 * @returns Strict VBox status code.
15411 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15412 * @param cbInstr The instruction length in bytes.
15413 * @thread EMT(pVCpu)
15414 */
15415VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15416{
15417 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15418
15419 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15420 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15421 Assert(!pVCpu->iem.s.cActiveMappings);
15422 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15423}
15424
15425
15426/**
15427 * Interface for HM and EM to emulate the STGI instruction.
15428 *
15429 * @returns Strict VBox status code.
15430 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15431 * @param cbInstr The instruction length in bytes.
15432 * @thread EMT(pVCpu)
15433 */
15434VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15435{
15436 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15437
15438 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15439 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15440 Assert(!pVCpu->iem.s.cActiveMappings);
15441 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15442}
15443
15444
15445/**
15446 * Interface for HM and EM to emulate the VMLOAD instruction.
15447 *
15448 * @returns Strict VBox status code.
15449 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15450 * @param cbInstr The instruction length in bytes.
15451 * @thread EMT(pVCpu)
15452 */
15453VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15454{
15455 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15456
15457 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15458 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15459 Assert(!pVCpu->iem.s.cActiveMappings);
15460 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15461}
15462
15463
15464/**
15465 * Interface for HM and EM to emulate the VMSAVE instruction.
15466 *
15467 * @returns Strict VBox status code.
15468 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15469 * @param cbInstr The instruction length in bytes.
15470 * @thread EMT(pVCpu)
15471 */
15472VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15473{
15474 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15475
15476 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15477 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15478 Assert(!pVCpu->iem.s.cActiveMappings);
15479 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15480}
15481
15482
15483/**
15484 * Interface for HM and EM to emulate the INVLPGA instruction.
15485 *
15486 * @returns Strict VBox status code.
15487 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15488 * @param cbInstr The instruction length in bytes.
15489 * @thread EMT(pVCpu)
15490 */
15491VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15492{
15493 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15494
15495 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15496 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15497 Assert(!pVCpu->iem.s.cActiveMappings);
15498 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15499}
15500
15501
15502/**
15503 * Interface for HM and EM to emulate the VMRUN instruction.
15504 *
15505 * @returns Strict VBox status code.
15506 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15507 * @param cbInstr The instruction length in bytes.
15508 * @thread EMT(pVCpu)
15509 */
15510VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15511{
15512 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15513 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15514
15515 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15516 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15517 Assert(!pVCpu->iem.s.cActiveMappings);
15518 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15519}
15520
15521
15522/**
15523 * Interface for HM and EM to emulate \#VMEXIT.
15524 *
15525 * @returns Strict VBox status code.
15526 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15527 * @param uExitCode The exit code.
15528 * @param uExitInfo1 The exit info. 1 field.
15529 * @param uExitInfo2 The exit info. 2 field.
15530 * @thread EMT(pVCpu)
15531 */
15532VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15533{
15534 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15535 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15536 if (pVCpu->iem.s.cActiveMappings)
15537 iemMemRollback(pVCpu);
15538 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15539}
15540
15541#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15542
15543#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15544
15545/**
15546 * Interface for HM and EM to emulate the VMPTRLD instruction.
15547 *
15548 * @returns Strict VBox status code.
15549 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15550 * @param cbInstr The instruction length in bytes.
15551 * @param GCPtrVmxon The linear address of the VMCS pointer.
15552 * @param uExitInstrInfo The VM-exit instruction information field.
15553 * @param GCPtrDisp The displacement field for @a GCPtrVmcs if any.
15554 * @thread EMT(pVCpu)
15555 */
15556VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmcs, uint32_t uExitInstrInfo,
15557 RTGCPTR GCPtrDisp)
15558{
15559 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15560 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15561
15562 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15563 PCVMXEXITINSTRINFO pExitInstrInfo = (PCVMXEXITINSTRINFO)&uExitInstrInfo;
15564 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, GCPtrVmcs, pExitInstrInfo, GCPtrDisp);
15565 if (pVCpu->iem.s.cActiveMappings)
15566 iemMemRollback(pVCpu);
15567 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15568}
15569
15570
15571/**
15572 * Interface for HM and EM to emulate the VMPTRST instruction.
15573 *
15574 * @returns Strict VBox status code.
15575 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15576 * @param cbInstr The instruction length in bytes.
15577 * @param GCPtrVmxon The linear address of where to store the VMCS pointer.
15578 * @param uExitInstrInfo The VM-exit instruction information field.
15579 * @param GCPtrDisp The displacement field for @a GCPtrVmcs if any.
15580 * @thread EMT(pVCpu)
15581 */
15582VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmcs, uint32_t uExitInstrInfo,
15583 RTGCPTR GCPtrDisp)
15584{
15585 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15586 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15587
15588 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15589 PCVMXEXITINSTRINFO pExitInstrInfo = (PCVMXEXITINSTRINFO)&uExitInstrInfo;
15590 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, GCPtrVmcs, pExitInstrInfo, GCPtrDisp);
15591 if (pVCpu->iem.s.cActiveMappings)
15592 iemMemRollback(pVCpu);
15593 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15594}
15595
15596
15597/**
15598 * Interface for HM and EM to emulate the VMCLEAR instruction.
15599 *
15600 * @returns Strict VBox status code.
15601 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15602 * @param cbInstr The instruction length in bytes.
15603 * @param GCPtrVmxon The linear address of the VMCS pointer.
15604 * @param uExitInstrInfo The VM-exit instruction information field.
15605 * @param GCPtrDisp The displacement field for @a GCPtrVmcs if any.
15606 * @thread EMT(pVCpu)
15607 */
15608VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmcs, uint32_t uExitInstrInfo,
15609 RTGCPTR GCPtrDisp)
15610{
15611 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15612 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15613
15614 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15615 PCVMXEXITINSTRINFO pExitInstrInfo = (PCVMXEXITINSTRINFO)&uExitInstrInfo;
15616 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, GCPtrVmcs, pExitInstrInfo, GCPtrDisp);
15617 if (pVCpu->iem.s.cActiveMappings)
15618 iemMemRollback(pVCpu);
15619 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15620}
15621
15622
15623/**
15624 * Interface for HM and EM to emulate the VMXON instruction.
15625 *
15626 * @returns Strict VBox status code.
15627 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15628 * @param cbInstr The instruction length in bytes.
15629 * @param GCPtrVmxon The linear address of the VMXON pointer.
15630 * @param uExitInstrInfo The VM-exit instruction information field.
15631 * @param GCPtrDisp The displacement field for @a GCPtrVmxon if any.
15632 * @thread EMT(pVCpu)
15633 */
15634VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, uint32_t uExitInstrInfo,
15635 RTGCPTR GCPtrDisp)
15636{
15637 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15638 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15639
15640 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15641 PCVMXEXITINSTRINFO pExitInstrInfo = (PCVMXEXITINSTRINFO)&uExitInstrInfo;
15642 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, GCPtrVmxon, pExitInstrInfo, GCPtrDisp);
15643 if (pVCpu->iem.s.cActiveMappings)
15644 iemMemRollback(pVCpu);
15645 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15646}
15647
15648
15649/**
15650 * Interface for HM and EM to emulate the VMXOFF instruction.
15651 *
15652 * @returns Strict VBox status code.
15653 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15654 * @param cbInstr The instruction length in bytes.
15655 * @thread EMT(pVCpu)
15656 */
15657VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
15658{
15659 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15660
15661 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15662 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
15663 Assert(!pVCpu->iem.s.cActiveMappings);
15664 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15665}
15666
15667#endif
15668
15669#ifdef IN_RING3
15670
15671/**
15672 * Handles the unlikely and probably fatal merge cases.
15673 *
15674 * @returns Merged status code.
15675 * @param rcStrict Current EM status code.
15676 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15677 * with @a rcStrict.
15678 * @param iMemMap The memory mapping index. For error reporting only.
15679 * @param pVCpu The cross context virtual CPU structure of the calling
15680 * thread, for error reporting only.
15681 */
15682DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15683 unsigned iMemMap, PVMCPU pVCpu)
15684{
15685 if (RT_FAILURE_NP(rcStrict))
15686 return rcStrict;
15687
15688 if (RT_FAILURE_NP(rcStrictCommit))
15689 return rcStrictCommit;
15690
15691 if (rcStrict == rcStrictCommit)
15692 return rcStrictCommit;
15693
15694 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15695 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15696 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15697 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15698 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15699 return VERR_IOM_FF_STATUS_IPE;
15700}
15701
15702
15703/**
15704 * Helper for IOMR3ProcessForceFlag.
15705 *
15706 * @returns Merged status code.
15707 * @param rcStrict Current EM status code.
15708 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15709 * with @a rcStrict.
15710 * @param iMemMap The memory mapping index. For error reporting only.
15711 * @param pVCpu The cross context virtual CPU structure of the calling
15712 * thread, for error reporting only.
15713 */
15714DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15715{
15716 /* Simple. */
15717 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15718 return rcStrictCommit;
15719
15720 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15721 return rcStrict;
15722
15723 /* EM scheduling status codes. */
15724 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15725 && rcStrict <= VINF_EM_LAST))
15726 {
15727 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15728 && rcStrictCommit <= VINF_EM_LAST))
15729 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15730 }
15731
15732 /* Unlikely */
15733 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15734}
15735
15736
15737/**
15738 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15739 *
15740 * @returns Merge between @a rcStrict and what the commit operation returned.
15741 * @param pVM The cross context VM structure.
15742 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15743 * @param rcStrict The status code returned by ring-0 or raw-mode.
15744 */
15745VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15746{
15747 /*
15748 * Reset the pending commit.
15749 */
15750 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15751 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15752 ("%#x %#x %#x\n",
15753 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15754 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15755
15756 /*
15757 * Commit the pending bounce buffers (usually just one).
15758 */
15759 unsigned cBufs = 0;
15760 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15761 while (iMemMap-- > 0)
15762 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15763 {
15764 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15765 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15766 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15767
15768 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15769 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15770 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15771
15772 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15773 {
15774 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15775 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15776 pbBuf,
15777 cbFirst,
15778 PGMACCESSORIGIN_IEM);
15779 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15780 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15781 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15782 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15783 }
15784
15785 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15786 {
15787 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15788 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15789 pbBuf + cbFirst,
15790 cbSecond,
15791 PGMACCESSORIGIN_IEM);
15792 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15793 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15794 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15795 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15796 }
15797 cBufs++;
15798 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15799 }
15800
15801 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15802 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15803 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15804 pVCpu->iem.s.cActiveMappings = 0;
15805 return rcStrict;
15806}
15807
15808#endif /* IN_RING3 */
15809
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette