VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 73992

Last change on this file since 73992 was 73983, checked in by vboxsync, 6 years ago

VMM/IEM, HM: Nested VMX: bugref:9180 Implement VMREAD, added using decoded IEM APIs for
VMXON, VMREAD, VMWRITE in VMX R0 code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 627.2 KB
Line 
1/* $Id: IEMAll.cpp 73983 2018-08-31 08:17:31Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211/**
212 * CPU exception classes.
213 */
214typedef enum IEMXCPTCLASS
215{
216 IEMXCPTCLASS_BENIGN,
217 IEMXCPTCLASS_CONTRIBUTORY,
218 IEMXCPTCLASS_PAGE_FAULT,
219 IEMXCPTCLASS_DOUBLE_FAULT
220} IEMXCPTCLASS;
221
222
223/*********************************************************************************************************************************
224* Defined Constants And Macros *
225*********************************************************************************************************************************/
226/** @def IEM_WITH_SETJMP
227 * Enables alternative status code handling using setjmps.
228 *
229 * This adds a bit of expense via the setjmp() call since it saves all the
230 * non-volatile registers. However, it eliminates return code checks and allows
231 * for more optimal return value passing (return regs instead of stack buffer).
232 */
233#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
234# define IEM_WITH_SETJMP
235#endif
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in a 64-bit code segment.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Check if we're currently executing in real mode.
335 *
336 * @returns @c true if it is, @c false if not.
337 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
338 */
339#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
340
341/**
342 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
343 * @returns PCCPUMFEATURES
344 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
345 */
346#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
347
348/**
349 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
350 * @returns PCCPUMFEATURES
351 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
352 */
353#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
354
355/**
356 * Evaluates to true if we're presenting an Intel CPU to the guest.
357 */
358#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
359
360/**
361 * Evaluates to true if we're presenting an AMD CPU to the guest.
362 */
363#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
364
365/**
366 * Check if the address is canonical.
367 */
368#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
369
370/**
371 * Gets the effective VEX.VVVV value.
372 *
373 * The 4th bit is ignored if not 64-bit code.
374 * @returns effective V-register value.
375 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
376 */
377#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
378 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
379
380/** @def IEM_USE_UNALIGNED_DATA_ACCESS
381 * Use unaligned accesses instead of elaborate byte assembly. */
382#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
383# define IEM_USE_UNALIGNED_DATA_ACCESS
384#endif
385
386#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
387/**
388 * Check the common VMX instruction preconditions.
389 */
390#define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) \
391 do { \
392 if (!IEM_IS_VMX_ENABLED(a_pVCpu)) \
393 { \
394 Log((a_szInstr ": CR4.VMXE not enabled -> #UD\n")); \
395 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_Vmxe; \
396 return iemRaiseUndefinedOpcode(a_pVCpu); \
397 } \
398 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
399 { \
400 Log((a_szInstr ": Real or v8086 mode -> #UD\n")); \
401 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_RealOrV86Mode; \
402 return iemRaiseUndefinedOpcode(a_pVCpu); \
403 } \
404 if (IEM_IS_LONG_MODE(a_pVCpu) && !IEM_IS_64BIT_CODE(a_pVCpu)) \
405 { \
406 Log((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
407 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_LongModeCS; \
408 return iemRaiseUndefinedOpcode(a_pVCpu); \
409 } \
410 } while (0)
411
412/**
413 * Check if VMX is enabled.
414 */
415# define IEM_IS_VMX_ENABLED(a_pVCpu) (CPUMIsGuestVmxEnabled(IEM_GET_CTX(a_pVCpu)))
416
417/**
418 * Check if the guest has entered VMX root operation.
419 */
420#define IEM_IS_VMX_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(pVCpu)))
421
422/**
423 * Check if the guest has entered VMX non-root operation.
424 */
425#define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
426
427#else
428# define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) do { } while (0)
429# define IEM_IS_VMX_ENABLED(a_pVCpu) (false)
430# define IEM_IS_VMX_ROOT_MODE(a_pVCpu) (false)
431# define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu) (false)
432
433#endif
434
435#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
436/**
437 * Check the common SVM instruction preconditions.
438 */
439# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
440 do { \
441 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
442 { \
443 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
444 return iemRaiseUndefinedOpcode(a_pVCpu); \
445 } \
446 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
447 { \
448 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
449 return iemRaiseUndefinedOpcode(a_pVCpu); \
450 } \
451 if ((a_pVCpu)->iem.s.uCpl != 0) \
452 { \
453 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
454 return iemRaiseGeneralProtectionFault0(a_pVCpu); \
455 } \
456 } while (0)
457
458/**
459 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
460 */
461# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
462 do { \
463 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
464 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
465 } while (0)
466
467/**
468 * Check if SVM is enabled.
469 */
470# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
471
472/**
473 * Check if an SVM control/instruction intercept is set.
474 */
475# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
476
477/**
478 * Check if an SVM read CRx intercept is set.
479 */
480# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
481
482/**
483 * Check if an SVM write CRx intercept is set.
484 */
485# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
486
487/**
488 * Check if an SVM read DRx intercept is set.
489 */
490# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
491
492/**
493 * Check if an SVM write DRx intercept is set.
494 */
495# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
496
497/**
498 * Check if an SVM exception intercept is set.
499 */
500# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
501
502/**
503 * Get the SVM pause-filter count.
504 */
505# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (CPUMGetGuestSvmPauseFilterCount(a_pVCpu, IEM_GET_CTX(a_pVCpu)))
506
507/**
508 * Invokes the SVM \#VMEXIT handler for the nested-guest.
509 */
510# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
511 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
512
513/**
514 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
515 * corresponding decode assist information.
516 */
517# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
518 do \
519 { \
520 uint64_t uExitInfo1; \
521 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
522 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
523 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
524 else \
525 uExitInfo1 = 0; \
526 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
527 } while (0)
528
529#else
530# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
531# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
532# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
533# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
534# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
535# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
536# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
537# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
538# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
539# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (0)
540# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
541# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
542
543#endif
544
545
546/*********************************************************************************************************************************
547* Global Variables *
548*********************************************************************************************************************************/
549extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
550
551
552/** Function table for the ADD instruction. */
553IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
554{
555 iemAImpl_add_u8, iemAImpl_add_u8_locked,
556 iemAImpl_add_u16, iemAImpl_add_u16_locked,
557 iemAImpl_add_u32, iemAImpl_add_u32_locked,
558 iemAImpl_add_u64, iemAImpl_add_u64_locked
559};
560
561/** Function table for the ADC instruction. */
562IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
563{
564 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
565 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
566 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
567 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
568};
569
570/** Function table for the SUB instruction. */
571IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
572{
573 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
574 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
575 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
576 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
577};
578
579/** Function table for the SBB instruction. */
580IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
581{
582 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
583 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
584 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
585 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
586};
587
588/** Function table for the OR instruction. */
589IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
590{
591 iemAImpl_or_u8, iemAImpl_or_u8_locked,
592 iemAImpl_or_u16, iemAImpl_or_u16_locked,
593 iemAImpl_or_u32, iemAImpl_or_u32_locked,
594 iemAImpl_or_u64, iemAImpl_or_u64_locked
595};
596
597/** Function table for the XOR instruction. */
598IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
599{
600 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
601 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
602 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
603 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
604};
605
606/** Function table for the AND instruction. */
607IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
608{
609 iemAImpl_and_u8, iemAImpl_and_u8_locked,
610 iemAImpl_and_u16, iemAImpl_and_u16_locked,
611 iemAImpl_and_u32, iemAImpl_and_u32_locked,
612 iemAImpl_and_u64, iemAImpl_and_u64_locked
613};
614
615/** Function table for the CMP instruction.
616 * @remarks Making operand order ASSUMPTIONS.
617 */
618IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
619{
620 iemAImpl_cmp_u8, NULL,
621 iemAImpl_cmp_u16, NULL,
622 iemAImpl_cmp_u32, NULL,
623 iemAImpl_cmp_u64, NULL
624};
625
626/** Function table for the TEST instruction.
627 * @remarks Making operand order ASSUMPTIONS.
628 */
629IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
630{
631 iemAImpl_test_u8, NULL,
632 iemAImpl_test_u16, NULL,
633 iemAImpl_test_u32, NULL,
634 iemAImpl_test_u64, NULL
635};
636
637/** Function table for the BT instruction. */
638IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
639{
640 NULL, NULL,
641 iemAImpl_bt_u16, NULL,
642 iemAImpl_bt_u32, NULL,
643 iemAImpl_bt_u64, NULL
644};
645
646/** Function table for the BTC instruction. */
647IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
648{
649 NULL, NULL,
650 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
651 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
652 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
653};
654
655/** Function table for the BTR instruction. */
656IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
657{
658 NULL, NULL,
659 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
660 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
661 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
662};
663
664/** Function table for the BTS instruction. */
665IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
666{
667 NULL, NULL,
668 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
669 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
670 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
671};
672
673/** Function table for the BSF instruction. */
674IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
675{
676 NULL, NULL,
677 iemAImpl_bsf_u16, NULL,
678 iemAImpl_bsf_u32, NULL,
679 iemAImpl_bsf_u64, NULL
680};
681
682/** Function table for the BSR instruction. */
683IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
684{
685 NULL, NULL,
686 iemAImpl_bsr_u16, NULL,
687 iemAImpl_bsr_u32, NULL,
688 iemAImpl_bsr_u64, NULL
689};
690
691/** Function table for the IMUL instruction. */
692IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
693{
694 NULL, NULL,
695 iemAImpl_imul_two_u16, NULL,
696 iemAImpl_imul_two_u32, NULL,
697 iemAImpl_imul_two_u64, NULL
698};
699
700/** Group 1 /r lookup table. */
701IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
702{
703 &g_iemAImpl_add,
704 &g_iemAImpl_or,
705 &g_iemAImpl_adc,
706 &g_iemAImpl_sbb,
707 &g_iemAImpl_and,
708 &g_iemAImpl_sub,
709 &g_iemAImpl_xor,
710 &g_iemAImpl_cmp
711};
712
713/** Function table for the INC instruction. */
714IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
715{
716 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
717 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
718 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
719 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
720};
721
722/** Function table for the DEC instruction. */
723IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
724{
725 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
726 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
727 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
728 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
729};
730
731/** Function table for the NEG instruction. */
732IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
733{
734 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
735 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
736 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
737 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
738};
739
740/** Function table for the NOT instruction. */
741IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
742{
743 iemAImpl_not_u8, iemAImpl_not_u8_locked,
744 iemAImpl_not_u16, iemAImpl_not_u16_locked,
745 iemAImpl_not_u32, iemAImpl_not_u32_locked,
746 iemAImpl_not_u64, iemAImpl_not_u64_locked
747};
748
749
750/** Function table for the ROL instruction. */
751IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
752{
753 iemAImpl_rol_u8,
754 iemAImpl_rol_u16,
755 iemAImpl_rol_u32,
756 iemAImpl_rol_u64
757};
758
759/** Function table for the ROR instruction. */
760IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
761{
762 iemAImpl_ror_u8,
763 iemAImpl_ror_u16,
764 iemAImpl_ror_u32,
765 iemAImpl_ror_u64
766};
767
768/** Function table for the RCL instruction. */
769IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
770{
771 iemAImpl_rcl_u8,
772 iemAImpl_rcl_u16,
773 iemAImpl_rcl_u32,
774 iemAImpl_rcl_u64
775};
776
777/** Function table for the RCR instruction. */
778IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
779{
780 iemAImpl_rcr_u8,
781 iemAImpl_rcr_u16,
782 iemAImpl_rcr_u32,
783 iemAImpl_rcr_u64
784};
785
786/** Function table for the SHL instruction. */
787IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
788{
789 iemAImpl_shl_u8,
790 iemAImpl_shl_u16,
791 iemAImpl_shl_u32,
792 iemAImpl_shl_u64
793};
794
795/** Function table for the SHR instruction. */
796IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
797{
798 iemAImpl_shr_u8,
799 iemAImpl_shr_u16,
800 iemAImpl_shr_u32,
801 iemAImpl_shr_u64
802};
803
804/** Function table for the SAR instruction. */
805IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
806{
807 iemAImpl_sar_u8,
808 iemAImpl_sar_u16,
809 iemAImpl_sar_u32,
810 iemAImpl_sar_u64
811};
812
813
814/** Function table for the MUL instruction. */
815IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
816{
817 iemAImpl_mul_u8,
818 iemAImpl_mul_u16,
819 iemAImpl_mul_u32,
820 iemAImpl_mul_u64
821};
822
823/** Function table for the IMUL instruction working implicitly on rAX. */
824IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
825{
826 iemAImpl_imul_u8,
827 iemAImpl_imul_u16,
828 iemAImpl_imul_u32,
829 iemAImpl_imul_u64
830};
831
832/** Function table for the DIV instruction. */
833IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
834{
835 iemAImpl_div_u8,
836 iemAImpl_div_u16,
837 iemAImpl_div_u32,
838 iemAImpl_div_u64
839};
840
841/** Function table for the MUL instruction. */
842IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
843{
844 iemAImpl_idiv_u8,
845 iemAImpl_idiv_u16,
846 iemAImpl_idiv_u32,
847 iemAImpl_idiv_u64
848};
849
850/** Function table for the SHLD instruction */
851IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
852{
853 iemAImpl_shld_u16,
854 iemAImpl_shld_u32,
855 iemAImpl_shld_u64,
856};
857
858/** Function table for the SHRD instruction */
859IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
860{
861 iemAImpl_shrd_u16,
862 iemAImpl_shrd_u32,
863 iemAImpl_shrd_u64,
864};
865
866
867/** Function table for the PUNPCKLBW instruction */
868IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
869/** Function table for the PUNPCKLBD instruction */
870IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
871/** Function table for the PUNPCKLDQ instruction */
872IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
873/** Function table for the PUNPCKLQDQ instruction */
874IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
875
876/** Function table for the PUNPCKHBW instruction */
877IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
878/** Function table for the PUNPCKHBD instruction */
879IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
880/** Function table for the PUNPCKHDQ instruction */
881IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
882/** Function table for the PUNPCKHQDQ instruction */
883IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
884
885/** Function table for the PXOR instruction */
886IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
887/** Function table for the PCMPEQB instruction */
888IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
889/** Function table for the PCMPEQW instruction */
890IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
891/** Function table for the PCMPEQD instruction */
892IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
893
894
895#if defined(IEM_LOG_MEMORY_WRITES)
896/** What IEM just wrote. */
897uint8_t g_abIemWrote[256];
898/** How much IEM just wrote. */
899size_t g_cbIemWrote;
900#endif
901
902
903/*********************************************************************************************************************************
904* Internal Functions *
905*********************************************************************************************************************************/
906IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
907IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
908IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
909IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
910/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
911IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
912IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
913IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
914IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
915IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
916IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
917IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
918IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
919IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
920IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
921IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
922IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
923#ifdef IEM_WITH_SETJMP
924DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
925DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
926DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
927DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
928DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
929#endif
930
931IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
932IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
933IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
934IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
935IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
936IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
937IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
938IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
939IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
940IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
941IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
942IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
943IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
944IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
945IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
946IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
947IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
948
949#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
950IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
951IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
952#endif
953
954
955/**
956 * Sets the pass up status.
957 *
958 * @returns VINF_SUCCESS.
959 * @param pVCpu The cross context virtual CPU structure of the
960 * calling thread.
961 * @param rcPassUp The pass up status. Must be informational.
962 * VINF_SUCCESS is not allowed.
963 */
964IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
965{
966 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
967
968 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
969 if (rcOldPassUp == VINF_SUCCESS)
970 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
971 /* If both are EM scheduling codes, use EM priority rules. */
972 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
973 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
974 {
975 if (rcPassUp < rcOldPassUp)
976 {
977 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
978 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
979 }
980 else
981 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
982 }
983 /* Override EM scheduling with specific status code. */
984 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
985 {
986 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
987 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
988 }
989 /* Don't override specific status code, first come first served. */
990 else
991 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
992 return VINF_SUCCESS;
993}
994
995
996/**
997 * Calculates the CPU mode.
998 *
999 * This is mainly for updating IEMCPU::enmCpuMode.
1000 *
1001 * @returns CPU mode.
1002 * @param pVCpu The cross context virtual CPU structure of the
1003 * calling thread.
1004 */
1005DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1006{
1007 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1008 return IEMMODE_64BIT;
1009 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1010 return IEMMODE_32BIT;
1011 return IEMMODE_16BIT;
1012}
1013
1014
1015/**
1016 * Initializes the execution state.
1017 *
1018 * @param pVCpu The cross context virtual CPU structure of the
1019 * calling thread.
1020 * @param fBypassHandlers Whether to bypass access handlers.
1021 *
1022 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1023 * side-effects in strict builds.
1024 */
1025DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1026{
1027 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1028 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1029
1030#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1031 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1032 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1033 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1034 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1035 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1036 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1039#endif
1040
1041#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1042 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1043#endif
1044 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1045 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1046#ifdef VBOX_STRICT
1047 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1048 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1049 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1050 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1051 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1052 pVCpu->iem.s.uRexReg = 127;
1053 pVCpu->iem.s.uRexB = 127;
1054 pVCpu->iem.s.offModRm = 127;
1055 pVCpu->iem.s.uRexIndex = 127;
1056 pVCpu->iem.s.iEffSeg = 127;
1057 pVCpu->iem.s.idxPrefix = 127;
1058 pVCpu->iem.s.uVex3rdReg = 127;
1059 pVCpu->iem.s.uVexLength = 127;
1060 pVCpu->iem.s.fEvexStuff = 127;
1061 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1062# ifdef IEM_WITH_CODE_TLB
1063 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1064 pVCpu->iem.s.pbInstrBuf = NULL;
1065 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1066 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1067 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1068 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1069# else
1070 pVCpu->iem.s.offOpcode = 127;
1071 pVCpu->iem.s.cbOpcode = 127;
1072# endif
1073#endif
1074
1075 pVCpu->iem.s.cActiveMappings = 0;
1076 pVCpu->iem.s.iNextMapping = 0;
1077 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1078 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1079#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1080 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1081 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1082 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1083 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1084 if (!pVCpu->iem.s.fInPatchCode)
1085 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1086#endif
1087}
1088
1089#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1090/**
1091 * Performs a minimal reinitialization of the execution state.
1092 *
1093 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1094 * 'world-switch' types operations on the CPU. Currently only nested
1095 * hardware-virtualization uses it.
1096 *
1097 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1098 */
1099IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1100{
1101 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1102 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1103
1104 pVCpu->iem.s.uCpl = uCpl;
1105 pVCpu->iem.s.enmCpuMode = enmMode;
1106 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1107 pVCpu->iem.s.enmEffAddrMode = enmMode;
1108 if (enmMode != IEMMODE_64BIT)
1109 {
1110 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1111 pVCpu->iem.s.enmEffOpSize = enmMode;
1112 }
1113 else
1114 {
1115 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1116 pVCpu->iem.s.enmEffOpSize = enmMode;
1117 }
1118 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1119#ifndef IEM_WITH_CODE_TLB
1120 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1121 pVCpu->iem.s.offOpcode = 0;
1122 pVCpu->iem.s.cbOpcode = 0;
1123#endif
1124 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1125}
1126#endif
1127
1128/**
1129 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1130 *
1131 * @param pVCpu The cross context virtual CPU structure of the
1132 * calling thread.
1133 */
1134DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1135{
1136 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1137#ifdef VBOX_STRICT
1138# ifdef IEM_WITH_CODE_TLB
1139 NOREF(pVCpu);
1140# else
1141 pVCpu->iem.s.cbOpcode = 0;
1142# endif
1143#else
1144 NOREF(pVCpu);
1145#endif
1146}
1147
1148
1149/**
1150 * Initializes the decoder state.
1151 *
1152 * iemReInitDecoder is mostly a copy of this function.
1153 *
1154 * @param pVCpu The cross context virtual CPU structure of the
1155 * calling thread.
1156 * @param fBypassHandlers Whether to bypass access handlers.
1157 */
1158DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1159{
1160 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1161 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1162
1163#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1164 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1165 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1166 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1167 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1168 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1169 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1170 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1171 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1172#endif
1173
1174#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1175 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1176#endif
1177 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1178 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1179 pVCpu->iem.s.enmCpuMode = enmMode;
1180 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1181 pVCpu->iem.s.enmEffAddrMode = enmMode;
1182 if (enmMode != IEMMODE_64BIT)
1183 {
1184 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1185 pVCpu->iem.s.enmEffOpSize = enmMode;
1186 }
1187 else
1188 {
1189 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1190 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1191 }
1192 pVCpu->iem.s.fPrefixes = 0;
1193 pVCpu->iem.s.uRexReg = 0;
1194 pVCpu->iem.s.uRexB = 0;
1195 pVCpu->iem.s.uRexIndex = 0;
1196 pVCpu->iem.s.idxPrefix = 0;
1197 pVCpu->iem.s.uVex3rdReg = 0;
1198 pVCpu->iem.s.uVexLength = 0;
1199 pVCpu->iem.s.fEvexStuff = 0;
1200 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1201#ifdef IEM_WITH_CODE_TLB
1202 pVCpu->iem.s.pbInstrBuf = NULL;
1203 pVCpu->iem.s.offInstrNextByte = 0;
1204 pVCpu->iem.s.offCurInstrStart = 0;
1205# ifdef VBOX_STRICT
1206 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1207 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1208 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1209# endif
1210#else
1211 pVCpu->iem.s.offOpcode = 0;
1212 pVCpu->iem.s.cbOpcode = 0;
1213#endif
1214 pVCpu->iem.s.offModRm = 0;
1215 pVCpu->iem.s.cActiveMappings = 0;
1216 pVCpu->iem.s.iNextMapping = 0;
1217 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1218 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1219#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1220 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1221 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1222 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1223 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1224 if (!pVCpu->iem.s.fInPatchCode)
1225 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1226#endif
1227
1228#ifdef DBGFTRACE_ENABLED
1229 switch (enmMode)
1230 {
1231 case IEMMODE_64BIT:
1232 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1233 break;
1234 case IEMMODE_32BIT:
1235 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1236 break;
1237 case IEMMODE_16BIT:
1238 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1239 break;
1240 }
1241#endif
1242}
1243
1244
1245/**
1246 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1247 *
1248 * This is mostly a copy of iemInitDecoder.
1249 *
1250 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1251 */
1252DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1253{
1254 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1255
1256#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1259 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1260 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1261 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1262 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1263 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1264 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1265#endif
1266
1267 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1268 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1269 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1270 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1271 pVCpu->iem.s.enmEffAddrMode = enmMode;
1272 if (enmMode != IEMMODE_64BIT)
1273 {
1274 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1275 pVCpu->iem.s.enmEffOpSize = enmMode;
1276 }
1277 else
1278 {
1279 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1280 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1281 }
1282 pVCpu->iem.s.fPrefixes = 0;
1283 pVCpu->iem.s.uRexReg = 0;
1284 pVCpu->iem.s.uRexB = 0;
1285 pVCpu->iem.s.uRexIndex = 0;
1286 pVCpu->iem.s.idxPrefix = 0;
1287 pVCpu->iem.s.uVex3rdReg = 0;
1288 pVCpu->iem.s.uVexLength = 0;
1289 pVCpu->iem.s.fEvexStuff = 0;
1290 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1291#ifdef IEM_WITH_CODE_TLB
1292 if (pVCpu->iem.s.pbInstrBuf)
1293 {
1294 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1295 - pVCpu->iem.s.uInstrBufPc;
1296 if (off < pVCpu->iem.s.cbInstrBufTotal)
1297 {
1298 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1299 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1300 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1301 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1302 else
1303 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1304 }
1305 else
1306 {
1307 pVCpu->iem.s.pbInstrBuf = NULL;
1308 pVCpu->iem.s.offInstrNextByte = 0;
1309 pVCpu->iem.s.offCurInstrStart = 0;
1310 pVCpu->iem.s.cbInstrBuf = 0;
1311 pVCpu->iem.s.cbInstrBufTotal = 0;
1312 }
1313 }
1314 else
1315 {
1316 pVCpu->iem.s.offInstrNextByte = 0;
1317 pVCpu->iem.s.offCurInstrStart = 0;
1318 pVCpu->iem.s.cbInstrBuf = 0;
1319 pVCpu->iem.s.cbInstrBufTotal = 0;
1320 }
1321#else
1322 pVCpu->iem.s.cbOpcode = 0;
1323 pVCpu->iem.s.offOpcode = 0;
1324#endif
1325 pVCpu->iem.s.offModRm = 0;
1326 Assert(pVCpu->iem.s.cActiveMappings == 0);
1327 pVCpu->iem.s.iNextMapping = 0;
1328 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1329 Assert(pVCpu->iem.s.fBypassHandlers == false);
1330#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1331 if (!pVCpu->iem.s.fInPatchCode)
1332 { /* likely */ }
1333 else
1334 {
1335 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1336 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1337 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1338 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1339 if (!pVCpu->iem.s.fInPatchCode)
1340 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1341 }
1342#endif
1343
1344#ifdef DBGFTRACE_ENABLED
1345 switch (enmMode)
1346 {
1347 case IEMMODE_64BIT:
1348 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1349 break;
1350 case IEMMODE_32BIT:
1351 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1352 break;
1353 case IEMMODE_16BIT:
1354 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1355 break;
1356 }
1357#endif
1358}
1359
1360
1361
1362/**
1363 * Prefetch opcodes the first time when starting executing.
1364 *
1365 * @returns Strict VBox status code.
1366 * @param pVCpu The cross context virtual CPU structure of the
1367 * calling thread.
1368 * @param fBypassHandlers Whether to bypass access handlers.
1369 */
1370IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1371{
1372 iemInitDecoder(pVCpu, fBypassHandlers);
1373
1374#ifdef IEM_WITH_CODE_TLB
1375 /** @todo Do ITLB lookup here. */
1376
1377#else /* !IEM_WITH_CODE_TLB */
1378
1379 /*
1380 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1381 *
1382 * First translate CS:rIP to a physical address.
1383 */
1384 uint32_t cbToTryRead;
1385 RTGCPTR GCPtrPC;
1386 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1387 {
1388 cbToTryRead = PAGE_SIZE;
1389 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1390 if (IEM_IS_CANONICAL(GCPtrPC))
1391 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1392 else
1393 return iemRaiseGeneralProtectionFault0(pVCpu);
1394 }
1395 else
1396 {
1397 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1398 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1399 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1400 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1401 else
1402 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1403 if (cbToTryRead) { /* likely */ }
1404 else /* overflowed */
1405 {
1406 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1407 cbToTryRead = UINT32_MAX;
1408 }
1409 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1410 Assert(GCPtrPC <= UINT32_MAX);
1411 }
1412
1413# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1414 /* Allow interpretation of patch manager code blocks since they can for
1415 instance throw #PFs for perfectly good reasons. */
1416 if (pVCpu->iem.s.fInPatchCode)
1417 {
1418 size_t cbRead = 0;
1419 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1420 AssertRCReturn(rc, rc);
1421 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1422 return VINF_SUCCESS;
1423 }
1424# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1425
1426 RTGCPHYS GCPhys;
1427 uint64_t fFlags;
1428 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1429 if (RT_SUCCESS(rc)) { /* probable */ }
1430 else
1431 {
1432 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1433 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1434 }
1435 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1436 else
1437 {
1438 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1439 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1440 }
1441 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1442 else
1443 {
1444 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1445 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1446 }
1447 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1448 /** @todo Check reserved bits and such stuff. PGM is better at doing
1449 * that, so do it when implementing the guest virtual address
1450 * TLB... */
1451
1452 /*
1453 * Read the bytes at this address.
1454 */
1455 PVM pVM = pVCpu->CTX_SUFF(pVM);
1456# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1457 size_t cbActual;
1458 if ( PATMIsEnabled(pVM)
1459 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1460 {
1461 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1462 Assert(cbActual > 0);
1463 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1464 }
1465 else
1466# endif
1467 {
1468 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1469 if (cbToTryRead > cbLeftOnPage)
1470 cbToTryRead = cbLeftOnPage;
1471 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1472 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1473
1474 if (!pVCpu->iem.s.fBypassHandlers)
1475 {
1476 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1477 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1478 { /* likely */ }
1479 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1480 {
1481 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1482 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1483 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1484 }
1485 else
1486 {
1487 Log((RT_SUCCESS(rcStrict)
1488 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1489 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1490 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1491 return rcStrict;
1492 }
1493 }
1494 else
1495 {
1496 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1497 if (RT_SUCCESS(rc))
1498 { /* likely */ }
1499 else
1500 {
1501 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1502 GCPtrPC, GCPhys, rc, cbToTryRead));
1503 return rc;
1504 }
1505 }
1506 pVCpu->iem.s.cbOpcode = cbToTryRead;
1507 }
1508#endif /* !IEM_WITH_CODE_TLB */
1509 return VINF_SUCCESS;
1510}
1511
1512
1513/**
1514 * Invalidates the IEM TLBs.
1515 *
1516 * This is called internally as well as by PGM when moving GC mappings.
1517 *
1518 * @returns
1519 * @param pVCpu The cross context virtual CPU structure of the calling
1520 * thread.
1521 * @param fVmm Set when PGM calls us with a remapping.
1522 */
1523VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1524{
1525#ifdef IEM_WITH_CODE_TLB
1526 pVCpu->iem.s.cbInstrBufTotal = 0;
1527 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1528 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1529 { /* very likely */ }
1530 else
1531 {
1532 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1533 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1534 while (i-- > 0)
1535 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1536 }
1537#endif
1538
1539#ifdef IEM_WITH_DATA_TLB
1540 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1541 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1542 { /* very likely */ }
1543 else
1544 {
1545 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1546 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1547 while (i-- > 0)
1548 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1549 }
1550#endif
1551 NOREF(pVCpu); NOREF(fVmm);
1552}
1553
1554
1555/**
1556 * Invalidates a page in the TLBs.
1557 *
1558 * @param pVCpu The cross context virtual CPU structure of the calling
1559 * thread.
1560 * @param GCPtr The address of the page to invalidate
1561 */
1562VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1563{
1564#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1565 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1566 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1567 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1568 uintptr_t idx = (uint8_t)GCPtr;
1569
1570# ifdef IEM_WITH_CODE_TLB
1571 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1572 {
1573 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1574 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1575 pVCpu->iem.s.cbInstrBufTotal = 0;
1576 }
1577# endif
1578
1579# ifdef IEM_WITH_DATA_TLB
1580 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1581 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1582# endif
1583#else
1584 NOREF(pVCpu); NOREF(GCPtr);
1585#endif
1586}
1587
1588
1589/**
1590 * Invalidates the host physical aspects of the IEM TLBs.
1591 *
1592 * This is called internally as well as by PGM when moving GC mappings.
1593 *
1594 * @param pVCpu The cross context virtual CPU structure of the calling
1595 * thread.
1596 */
1597VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1598{
1599#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1600 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1601
1602# ifdef IEM_WITH_CODE_TLB
1603 pVCpu->iem.s.cbInstrBufTotal = 0;
1604# endif
1605 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1606 if (uTlbPhysRev != 0)
1607 {
1608 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1609 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1610 }
1611 else
1612 {
1613 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1614 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1615
1616 unsigned i;
1617# ifdef IEM_WITH_CODE_TLB
1618 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1619 while (i-- > 0)
1620 {
1621 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1622 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1623 }
1624# endif
1625# ifdef IEM_WITH_DATA_TLB
1626 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1627 while (i-- > 0)
1628 {
1629 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1630 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1631 }
1632# endif
1633 }
1634#else
1635 NOREF(pVCpu);
1636#endif
1637}
1638
1639
1640/**
1641 * Invalidates the host physical aspects of the IEM TLBs.
1642 *
1643 * This is called internally as well as by PGM when moving GC mappings.
1644 *
1645 * @param pVM The cross context VM structure.
1646 *
1647 * @remarks Caller holds the PGM lock.
1648 */
1649VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1650{
1651 RT_NOREF_PV(pVM);
1652}
1653
1654#ifdef IEM_WITH_CODE_TLB
1655
1656/**
1657 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1658 * failure and jumps.
1659 *
1660 * We end up here for a number of reasons:
1661 * - pbInstrBuf isn't yet initialized.
1662 * - Advancing beyond the buffer boundrary (e.g. cross page).
1663 * - Advancing beyond the CS segment limit.
1664 * - Fetching from non-mappable page (e.g. MMIO).
1665 *
1666 * @param pVCpu The cross context virtual CPU structure of the
1667 * calling thread.
1668 * @param pvDst Where to return the bytes.
1669 * @param cbDst Number of bytes to read.
1670 *
1671 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1672 */
1673IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1674{
1675#ifdef IN_RING3
1676 for (;;)
1677 {
1678 Assert(cbDst <= 8);
1679 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1680
1681 /*
1682 * We might have a partial buffer match, deal with that first to make the
1683 * rest simpler. This is the first part of the cross page/buffer case.
1684 */
1685 if (pVCpu->iem.s.pbInstrBuf != NULL)
1686 {
1687 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1688 {
1689 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1690 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1691 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1692
1693 cbDst -= cbCopy;
1694 pvDst = (uint8_t *)pvDst + cbCopy;
1695 offBuf += cbCopy;
1696 pVCpu->iem.s.offInstrNextByte += offBuf;
1697 }
1698 }
1699
1700 /*
1701 * Check segment limit, figuring how much we're allowed to access at this point.
1702 *
1703 * We will fault immediately if RIP is past the segment limit / in non-canonical
1704 * territory. If we do continue, there are one or more bytes to read before we
1705 * end up in trouble and we need to do that first before faulting.
1706 */
1707 RTGCPTR GCPtrFirst;
1708 uint32_t cbMaxRead;
1709 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1710 {
1711 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1712 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1713 { /* likely */ }
1714 else
1715 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1716 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1717 }
1718 else
1719 {
1720 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1721 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1722 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1723 { /* likely */ }
1724 else
1725 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1726 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1727 if (cbMaxRead != 0)
1728 { /* likely */ }
1729 else
1730 {
1731 /* Overflowed because address is 0 and limit is max. */
1732 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1733 cbMaxRead = X86_PAGE_SIZE;
1734 }
1735 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1736 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1737 if (cbMaxRead2 < cbMaxRead)
1738 cbMaxRead = cbMaxRead2;
1739 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1740 }
1741
1742 /*
1743 * Get the TLB entry for this piece of code.
1744 */
1745 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1746 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1747 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1748 if (pTlbe->uTag == uTag)
1749 {
1750 /* likely when executing lots of code, otherwise unlikely */
1751# ifdef VBOX_WITH_STATISTICS
1752 pVCpu->iem.s.CodeTlb.cTlbHits++;
1753# endif
1754 }
1755 else
1756 {
1757 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1758# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1759 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1760 {
1761 pTlbe->uTag = uTag;
1762 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1763 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1764 pTlbe->GCPhys = NIL_RTGCPHYS;
1765 pTlbe->pbMappingR3 = NULL;
1766 }
1767 else
1768# endif
1769 {
1770 RTGCPHYS GCPhys;
1771 uint64_t fFlags;
1772 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1773 if (RT_FAILURE(rc))
1774 {
1775 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1776 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1777 }
1778
1779 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1780 pTlbe->uTag = uTag;
1781 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1782 pTlbe->GCPhys = GCPhys;
1783 pTlbe->pbMappingR3 = NULL;
1784 }
1785 }
1786
1787 /*
1788 * Check TLB page table level access flags.
1789 */
1790 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1791 {
1792 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1793 {
1794 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1795 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1796 }
1797 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1798 {
1799 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1800 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1801 }
1802 }
1803
1804# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1805 /*
1806 * Allow interpretation of patch manager code blocks since they can for
1807 * instance throw #PFs for perfectly good reasons.
1808 */
1809 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1810 { /* no unlikely */ }
1811 else
1812 {
1813 /** @todo Could be optimized this a little in ring-3 if we liked. */
1814 size_t cbRead = 0;
1815 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1816 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1817 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1818 return;
1819 }
1820# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1821
1822 /*
1823 * Look up the physical page info if necessary.
1824 */
1825 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1826 { /* not necessary */ }
1827 else
1828 {
1829 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1830 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1831 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1832 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1833 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1834 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1835 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1836 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1837 }
1838
1839# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1840 /*
1841 * Try do a direct read using the pbMappingR3 pointer.
1842 */
1843 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1844 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1845 {
1846 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1847 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1848 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1849 {
1850 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1851 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1852 }
1853 else
1854 {
1855 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1856 Assert(cbInstr < cbMaxRead);
1857 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1858 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1859 }
1860 if (cbDst <= cbMaxRead)
1861 {
1862 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1863 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1864 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1865 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1866 return;
1867 }
1868 pVCpu->iem.s.pbInstrBuf = NULL;
1869
1870 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1871 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1872 }
1873 else
1874# endif
1875#if 0
1876 /*
1877 * If there is no special read handling, so we can read a bit more and
1878 * put it in the prefetch buffer.
1879 */
1880 if ( cbDst < cbMaxRead
1881 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1882 {
1883 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1884 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1885 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1886 { /* likely */ }
1887 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1888 {
1889 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1890 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1891 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1892 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1893 }
1894 else
1895 {
1896 Log((RT_SUCCESS(rcStrict)
1897 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1898 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1899 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1900 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1901 }
1902 }
1903 /*
1904 * Special read handling, so only read exactly what's needed.
1905 * This is a highly unlikely scenario.
1906 */
1907 else
1908#endif
1909 {
1910 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1911 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1912 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1913 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1914 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1915 { /* likely */ }
1916 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1917 {
1918 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1919 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1920 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1921 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1922 }
1923 else
1924 {
1925 Log((RT_SUCCESS(rcStrict)
1926 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1927 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1928 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1929 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1930 }
1931 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1932 if (cbToRead == cbDst)
1933 return;
1934 }
1935
1936 /*
1937 * More to read, loop.
1938 */
1939 cbDst -= cbMaxRead;
1940 pvDst = (uint8_t *)pvDst + cbMaxRead;
1941 }
1942#else
1943 RT_NOREF(pvDst, cbDst);
1944 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1945#endif
1946}
1947
1948#else
1949
1950/**
1951 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1952 * exception if it fails.
1953 *
1954 * @returns Strict VBox status code.
1955 * @param pVCpu The cross context virtual CPU structure of the
1956 * calling thread.
1957 * @param cbMin The minimum number of bytes relative offOpcode
1958 * that must be read.
1959 */
1960IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1961{
1962 /*
1963 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1964 *
1965 * First translate CS:rIP to a physical address.
1966 */
1967 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1968 uint32_t cbToTryRead;
1969 RTGCPTR GCPtrNext;
1970 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1971 {
1972 cbToTryRead = PAGE_SIZE;
1973 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1974 if (!IEM_IS_CANONICAL(GCPtrNext))
1975 return iemRaiseGeneralProtectionFault0(pVCpu);
1976 }
1977 else
1978 {
1979 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1980 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1981 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1982 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1983 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1984 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1985 if (!cbToTryRead) /* overflowed */
1986 {
1987 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1988 cbToTryRead = UINT32_MAX;
1989 /** @todo check out wrapping around the code segment. */
1990 }
1991 if (cbToTryRead < cbMin - cbLeft)
1992 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1993 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1994 }
1995
1996 /* Only read up to the end of the page, and make sure we don't read more
1997 than the opcode buffer can hold. */
1998 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1999 if (cbToTryRead > cbLeftOnPage)
2000 cbToTryRead = cbLeftOnPage;
2001 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2002 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2003/** @todo r=bird: Convert assertion into undefined opcode exception? */
2004 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2005
2006# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2007 /* Allow interpretation of patch manager code blocks since they can for
2008 instance throw #PFs for perfectly good reasons. */
2009 if (pVCpu->iem.s.fInPatchCode)
2010 {
2011 size_t cbRead = 0;
2012 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2013 AssertRCReturn(rc, rc);
2014 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2015 return VINF_SUCCESS;
2016 }
2017# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2018
2019 RTGCPHYS GCPhys;
2020 uint64_t fFlags;
2021 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2022 if (RT_FAILURE(rc))
2023 {
2024 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2025 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2026 }
2027 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2028 {
2029 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2030 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2031 }
2032 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2033 {
2034 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2035 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2036 }
2037 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2038 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2039 /** @todo Check reserved bits and such stuff. PGM is better at doing
2040 * that, so do it when implementing the guest virtual address
2041 * TLB... */
2042
2043 /*
2044 * Read the bytes at this address.
2045 *
2046 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2047 * and since PATM should only patch the start of an instruction there
2048 * should be no need to check again here.
2049 */
2050 if (!pVCpu->iem.s.fBypassHandlers)
2051 {
2052 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2053 cbToTryRead, PGMACCESSORIGIN_IEM);
2054 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2055 { /* likely */ }
2056 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2057 {
2058 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2059 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2060 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2061 }
2062 else
2063 {
2064 Log((RT_SUCCESS(rcStrict)
2065 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2066 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2067 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2068 return rcStrict;
2069 }
2070 }
2071 else
2072 {
2073 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2074 if (RT_SUCCESS(rc))
2075 { /* likely */ }
2076 else
2077 {
2078 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2079 return rc;
2080 }
2081 }
2082 pVCpu->iem.s.cbOpcode += cbToTryRead;
2083 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2084
2085 return VINF_SUCCESS;
2086}
2087
2088#endif /* !IEM_WITH_CODE_TLB */
2089#ifndef IEM_WITH_SETJMP
2090
2091/**
2092 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2093 *
2094 * @returns Strict VBox status code.
2095 * @param pVCpu The cross context virtual CPU structure of the
2096 * calling thread.
2097 * @param pb Where to return the opcode byte.
2098 */
2099DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2100{
2101 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2102 if (rcStrict == VINF_SUCCESS)
2103 {
2104 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2105 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2106 pVCpu->iem.s.offOpcode = offOpcode + 1;
2107 }
2108 else
2109 *pb = 0;
2110 return rcStrict;
2111}
2112
2113
2114/**
2115 * Fetches the next opcode byte.
2116 *
2117 * @returns Strict VBox status code.
2118 * @param pVCpu The cross context virtual CPU structure of the
2119 * calling thread.
2120 * @param pu8 Where to return the opcode byte.
2121 */
2122DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2123{
2124 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2125 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2126 {
2127 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2128 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2129 return VINF_SUCCESS;
2130 }
2131 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2132}
2133
2134#else /* IEM_WITH_SETJMP */
2135
2136/**
2137 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2138 *
2139 * @returns The opcode byte.
2140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2141 */
2142DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2143{
2144# ifdef IEM_WITH_CODE_TLB
2145 uint8_t u8;
2146 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2147 return u8;
2148# else
2149 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2150 if (rcStrict == VINF_SUCCESS)
2151 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2152 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2153# endif
2154}
2155
2156
2157/**
2158 * Fetches the next opcode byte, longjmp on error.
2159 *
2160 * @returns The opcode byte.
2161 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2162 */
2163DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2164{
2165# ifdef IEM_WITH_CODE_TLB
2166 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2167 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2168 if (RT_LIKELY( pbBuf != NULL
2169 && offBuf < pVCpu->iem.s.cbInstrBuf))
2170 {
2171 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2172 return pbBuf[offBuf];
2173 }
2174# else
2175 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2176 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2177 {
2178 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2179 return pVCpu->iem.s.abOpcode[offOpcode];
2180 }
2181# endif
2182 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2183}
2184
2185#endif /* IEM_WITH_SETJMP */
2186
2187/**
2188 * Fetches the next opcode byte, returns automatically on failure.
2189 *
2190 * @param a_pu8 Where to return the opcode byte.
2191 * @remark Implicitly references pVCpu.
2192 */
2193#ifndef IEM_WITH_SETJMP
2194# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2195 do \
2196 { \
2197 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2198 if (rcStrict2 == VINF_SUCCESS) \
2199 { /* likely */ } \
2200 else \
2201 return rcStrict2; \
2202 } while (0)
2203#else
2204# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2205#endif /* IEM_WITH_SETJMP */
2206
2207
2208#ifndef IEM_WITH_SETJMP
2209/**
2210 * Fetches the next signed byte from the opcode stream.
2211 *
2212 * @returns Strict VBox status code.
2213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2214 * @param pi8 Where to return the signed byte.
2215 */
2216DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2217{
2218 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2219}
2220#endif /* !IEM_WITH_SETJMP */
2221
2222
2223/**
2224 * Fetches the next signed byte from the opcode stream, returning automatically
2225 * on failure.
2226 *
2227 * @param a_pi8 Where to return the signed byte.
2228 * @remark Implicitly references pVCpu.
2229 */
2230#ifndef IEM_WITH_SETJMP
2231# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2232 do \
2233 { \
2234 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2235 if (rcStrict2 != VINF_SUCCESS) \
2236 return rcStrict2; \
2237 } while (0)
2238#else /* IEM_WITH_SETJMP */
2239# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2240
2241#endif /* IEM_WITH_SETJMP */
2242
2243#ifndef IEM_WITH_SETJMP
2244
2245/**
2246 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2247 *
2248 * @returns Strict VBox status code.
2249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2250 * @param pu16 Where to return the opcode dword.
2251 */
2252DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2253{
2254 uint8_t u8;
2255 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2256 if (rcStrict == VINF_SUCCESS)
2257 *pu16 = (int8_t)u8;
2258 return rcStrict;
2259}
2260
2261
2262/**
2263 * Fetches the next signed byte from the opcode stream, extending it to
2264 * unsigned 16-bit.
2265 *
2266 * @returns Strict VBox status code.
2267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2268 * @param pu16 Where to return the unsigned word.
2269 */
2270DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2271{
2272 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2273 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2274 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2275
2276 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2277 pVCpu->iem.s.offOpcode = offOpcode + 1;
2278 return VINF_SUCCESS;
2279}
2280
2281#endif /* !IEM_WITH_SETJMP */
2282
2283/**
2284 * Fetches the next signed byte from the opcode stream and sign-extending it to
2285 * a word, returning automatically on failure.
2286 *
2287 * @param a_pu16 Where to return the word.
2288 * @remark Implicitly references pVCpu.
2289 */
2290#ifndef IEM_WITH_SETJMP
2291# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2292 do \
2293 { \
2294 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2295 if (rcStrict2 != VINF_SUCCESS) \
2296 return rcStrict2; \
2297 } while (0)
2298#else
2299# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2300#endif
2301
2302#ifndef IEM_WITH_SETJMP
2303
2304/**
2305 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2306 *
2307 * @returns Strict VBox status code.
2308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2309 * @param pu32 Where to return the opcode dword.
2310 */
2311DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2312{
2313 uint8_t u8;
2314 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2315 if (rcStrict == VINF_SUCCESS)
2316 *pu32 = (int8_t)u8;
2317 return rcStrict;
2318}
2319
2320
2321/**
2322 * Fetches the next signed byte from the opcode stream, extending it to
2323 * unsigned 32-bit.
2324 *
2325 * @returns Strict VBox status code.
2326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2327 * @param pu32 Where to return the unsigned dword.
2328 */
2329DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2330{
2331 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2332 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2333 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2334
2335 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2336 pVCpu->iem.s.offOpcode = offOpcode + 1;
2337 return VINF_SUCCESS;
2338}
2339
2340#endif /* !IEM_WITH_SETJMP */
2341
2342/**
2343 * Fetches the next signed byte from the opcode stream and sign-extending it to
2344 * a word, returning automatically on failure.
2345 *
2346 * @param a_pu32 Where to return the word.
2347 * @remark Implicitly references pVCpu.
2348 */
2349#ifndef IEM_WITH_SETJMP
2350#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2351 do \
2352 { \
2353 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2354 if (rcStrict2 != VINF_SUCCESS) \
2355 return rcStrict2; \
2356 } while (0)
2357#else
2358# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2359#endif
2360
2361#ifndef IEM_WITH_SETJMP
2362
2363/**
2364 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2365 *
2366 * @returns Strict VBox status code.
2367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2368 * @param pu64 Where to return the opcode qword.
2369 */
2370DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2371{
2372 uint8_t u8;
2373 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2374 if (rcStrict == VINF_SUCCESS)
2375 *pu64 = (int8_t)u8;
2376 return rcStrict;
2377}
2378
2379
2380/**
2381 * Fetches the next signed byte from the opcode stream, extending it to
2382 * unsigned 64-bit.
2383 *
2384 * @returns Strict VBox status code.
2385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2386 * @param pu64 Where to return the unsigned qword.
2387 */
2388DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2389{
2390 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2391 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2392 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2393
2394 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2395 pVCpu->iem.s.offOpcode = offOpcode + 1;
2396 return VINF_SUCCESS;
2397}
2398
2399#endif /* !IEM_WITH_SETJMP */
2400
2401
2402/**
2403 * Fetches the next signed byte from the opcode stream and sign-extending it to
2404 * a word, returning automatically on failure.
2405 *
2406 * @param a_pu64 Where to return the word.
2407 * @remark Implicitly references pVCpu.
2408 */
2409#ifndef IEM_WITH_SETJMP
2410# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2411 do \
2412 { \
2413 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2414 if (rcStrict2 != VINF_SUCCESS) \
2415 return rcStrict2; \
2416 } while (0)
2417#else
2418# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2419#endif
2420
2421
2422#ifndef IEM_WITH_SETJMP
2423/**
2424 * Fetches the next opcode byte.
2425 *
2426 * @returns Strict VBox status code.
2427 * @param pVCpu The cross context virtual CPU structure of the
2428 * calling thread.
2429 * @param pu8 Where to return the opcode byte.
2430 */
2431DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2432{
2433 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2434 pVCpu->iem.s.offModRm = offOpcode;
2435 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2436 {
2437 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2438 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2439 return VINF_SUCCESS;
2440 }
2441 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2442}
2443#else /* IEM_WITH_SETJMP */
2444/**
2445 * Fetches the next opcode byte, longjmp on error.
2446 *
2447 * @returns The opcode byte.
2448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2449 */
2450DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2451{
2452# ifdef IEM_WITH_CODE_TLB
2453 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2454 pVCpu->iem.s.offModRm = offBuf;
2455 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2456 if (RT_LIKELY( pbBuf != NULL
2457 && offBuf < pVCpu->iem.s.cbInstrBuf))
2458 {
2459 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2460 return pbBuf[offBuf];
2461 }
2462# else
2463 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2464 pVCpu->iem.s.offModRm = offOpcode;
2465 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2466 {
2467 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2468 return pVCpu->iem.s.abOpcode[offOpcode];
2469 }
2470# endif
2471 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2472}
2473#endif /* IEM_WITH_SETJMP */
2474
2475/**
2476 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2477 * on failure.
2478 *
2479 * Will note down the position of the ModR/M byte for VT-x exits.
2480 *
2481 * @param a_pbRm Where to return the RM opcode byte.
2482 * @remark Implicitly references pVCpu.
2483 */
2484#ifndef IEM_WITH_SETJMP
2485# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2486 do \
2487 { \
2488 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2489 if (rcStrict2 == VINF_SUCCESS) \
2490 { /* likely */ } \
2491 else \
2492 return rcStrict2; \
2493 } while (0)
2494#else
2495# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2496#endif /* IEM_WITH_SETJMP */
2497
2498
2499#ifndef IEM_WITH_SETJMP
2500
2501/**
2502 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2503 *
2504 * @returns Strict VBox status code.
2505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2506 * @param pu16 Where to return the opcode word.
2507 */
2508DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2509{
2510 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2511 if (rcStrict == VINF_SUCCESS)
2512 {
2513 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2514# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2515 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2516# else
2517 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2518# endif
2519 pVCpu->iem.s.offOpcode = offOpcode + 2;
2520 }
2521 else
2522 *pu16 = 0;
2523 return rcStrict;
2524}
2525
2526
2527/**
2528 * Fetches the next opcode word.
2529 *
2530 * @returns Strict VBox status code.
2531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2532 * @param pu16 Where to return the opcode word.
2533 */
2534DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2535{
2536 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2537 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2538 {
2539 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2540# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2541 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2542# else
2543 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2544# endif
2545 return VINF_SUCCESS;
2546 }
2547 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2548}
2549
2550#else /* IEM_WITH_SETJMP */
2551
2552/**
2553 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2554 *
2555 * @returns The opcode word.
2556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2557 */
2558DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2559{
2560# ifdef IEM_WITH_CODE_TLB
2561 uint16_t u16;
2562 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2563 return u16;
2564# else
2565 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2566 if (rcStrict == VINF_SUCCESS)
2567 {
2568 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2569 pVCpu->iem.s.offOpcode += 2;
2570# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2571 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2572# else
2573 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2574# endif
2575 }
2576 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2577# endif
2578}
2579
2580
2581/**
2582 * Fetches the next opcode word, longjmp on error.
2583 *
2584 * @returns The opcode word.
2585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2586 */
2587DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2588{
2589# ifdef IEM_WITH_CODE_TLB
2590 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2591 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2592 if (RT_LIKELY( pbBuf != NULL
2593 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2594 {
2595 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2596# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2597 return *(uint16_t const *)&pbBuf[offBuf];
2598# else
2599 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2600# endif
2601 }
2602# else
2603 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2604 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2605 {
2606 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2607# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2608 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2609# else
2610 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2611# endif
2612 }
2613# endif
2614 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2615}
2616
2617#endif /* IEM_WITH_SETJMP */
2618
2619
2620/**
2621 * Fetches the next opcode word, returns automatically on failure.
2622 *
2623 * @param a_pu16 Where to return the opcode word.
2624 * @remark Implicitly references pVCpu.
2625 */
2626#ifndef IEM_WITH_SETJMP
2627# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2628 do \
2629 { \
2630 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2631 if (rcStrict2 != VINF_SUCCESS) \
2632 return rcStrict2; \
2633 } while (0)
2634#else
2635# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2636#endif
2637
2638#ifndef IEM_WITH_SETJMP
2639
2640/**
2641 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2642 *
2643 * @returns Strict VBox status code.
2644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2645 * @param pu32 Where to return the opcode double word.
2646 */
2647DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2648{
2649 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2650 if (rcStrict == VINF_SUCCESS)
2651 {
2652 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2653 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2654 pVCpu->iem.s.offOpcode = offOpcode + 2;
2655 }
2656 else
2657 *pu32 = 0;
2658 return rcStrict;
2659}
2660
2661
2662/**
2663 * Fetches the next opcode word, zero extending it to a double word.
2664 *
2665 * @returns Strict VBox status code.
2666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2667 * @param pu32 Where to return the opcode double word.
2668 */
2669DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2670{
2671 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2672 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2673 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2674
2675 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2676 pVCpu->iem.s.offOpcode = offOpcode + 2;
2677 return VINF_SUCCESS;
2678}
2679
2680#endif /* !IEM_WITH_SETJMP */
2681
2682
2683/**
2684 * Fetches the next opcode word and zero extends it to a double word, returns
2685 * automatically on failure.
2686 *
2687 * @param a_pu32 Where to return the opcode double word.
2688 * @remark Implicitly references pVCpu.
2689 */
2690#ifndef IEM_WITH_SETJMP
2691# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2692 do \
2693 { \
2694 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2695 if (rcStrict2 != VINF_SUCCESS) \
2696 return rcStrict2; \
2697 } while (0)
2698#else
2699# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2700#endif
2701
2702#ifndef IEM_WITH_SETJMP
2703
2704/**
2705 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2706 *
2707 * @returns Strict VBox status code.
2708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2709 * @param pu64 Where to return the opcode quad word.
2710 */
2711DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2712{
2713 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2714 if (rcStrict == VINF_SUCCESS)
2715 {
2716 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2717 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2718 pVCpu->iem.s.offOpcode = offOpcode + 2;
2719 }
2720 else
2721 *pu64 = 0;
2722 return rcStrict;
2723}
2724
2725
2726/**
2727 * Fetches the next opcode word, zero extending it to a quad word.
2728 *
2729 * @returns Strict VBox status code.
2730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2731 * @param pu64 Where to return the opcode quad word.
2732 */
2733DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2734{
2735 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2736 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2737 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2738
2739 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2740 pVCpu->iem.s.offOpcode = offOpcode + 2;
2741 return VINF_SUCCESS;
2742}
2743
2744#endif /* !IEM_WITH_SETJMP */
2745
2746/**
2747 * Fetches the next opcode word and zero extends it to a quad word, returns
2748 * automatically on failure.
2749 *
2750 * @param a_pu64 Where to return the opcode quad word.
2751 * @remark Implicitly references pVCpu.
2752 */
2753#ifndef IEM_WITH_SETJMP
2754# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2755 do \
2756 { \
2757 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2758 if (rcStrict2 != VINF_SUCCESS) \
2759 return rcStrict2; \
2760 } while (0)
2761#else
2762# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2763#endif
2764
2765
2766#ifndef IEM_WITH_SETJMP
2767/**
2768 * Fetches the next signed word from the opcode stream.
2769 *
2770 * @returns Strict VBox status code.
2771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2772 * @param pi16 Where to return the signed word.
2773 */
2774DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2775{
2776 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2777}
2778#endif /* !IEM_WITH_SETJMP */
2779
2780
2781/**
2782 * Fetches the next signed word from the opcode stream, returning automatically
2783 * on failure.
2784 *
2785 * @param a_pi16 Where to return the signed word.
2786 * @remark Implicitly references pVCpu.
2787 */
2788#ifndef IEM_WITH_SETJMP
2789# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2790 do \
2791 { \
2792 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2793 if (rcStrict2 != VINF_SUCCESS) \
2794 return rcStrict2; \
2795 } while (0)
2796#else
2797# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2798#endif
2799
2800#ifndef IEM_WITH_SETJMP
2801
2802/**
2803 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2804 *
2805 * @returns Strict VBox status code.
2806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2807 * @param pu32 Where to return the opcode dword.
2808 */
2809DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2810{
2811 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2812 if (rcStrict == VINF_SUCCESS)
2813 {
2814 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2815# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2816 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2817# else
2818 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2819 pVCpu->iem.s.abOpcode[offOpcode + 1],
2820 pVCpu->iem.s.abOpcode[offOpcode + 2],
2821 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2822# endif
2823 pVCpu->iem.s.offOpcode = offOpcode + 4;
2824 }
2825 else
2826 *pu32 = 0;
2827 return rcStrict;
2828}
2829
2830
2831/**
2832 * Fetches the next opcode dword.
2833 *
2834 * @returns Strict VBox status code.
2835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2836 * @param pu32 Where to return the opcode double word.
2837 */
2838DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2839{
2840 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2841 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2842 {
2843 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2844# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2845 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2846# else
2847 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2848 pVCpu->iem.s.abOpcode[offOpcode + 1],
2849 pVCpu->iem.s.abOpcode[offOpcode + 2],
2850 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2851# endif
2852 return VINF_SUCCESS;
2853 }
2854 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2855}
2856
2857#else /* !IEM_WITH_SETJMP */
2858
2859/**
2860 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2861 *
2862 * @returns The opcode dword.
2863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2864 */
2865DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2866{
2867# ifdef IEM_WITH_CODE_TLB
2868 uint32_t u32;
2869 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2870 return u32;
2871# else
2872 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2873 if (rcStrict == VINF_SUCCESS)
2874 {
2875 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2876 pVCpu->iem.s.offOpcode = offOpcode + 4;
2877# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2878 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2879# else
2880 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2881 pVCpu->iem.s.abOpcode[offOpcode + 1],
2882 pVCpu->iem.s.abOpcode[offOpcode + 2],
2883 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2884# endif
2885 }
2886 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2887# endif
2888}
2889
2890
2891/**
2892 * Fetches the next opcode dword, longjmp on error.
2893 *
2894 * @returns The opcode dword.
2895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2896 */
2897DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2898{
2899# ifdef IEM_WITH_CODE_TLB
2900 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2901 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2902 if (RT_LIKELY( pbBuf != NULL
2903 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2904 {
2905 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2906# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2907 return *(uint32_t const *)&pbBuf[offBuf];
2908# else
2909 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2910 pbBuf[offBuf + 1],
2911 pbBuf[offBuf + 2],
2912 pbBuf[offBuf + 3]);
2913# endif
2914 }
2915# else
2916 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2917 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2918 {
2919 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2920# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2921 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2922# else
2923 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2924 pVCpu->iem.s.abOpcode[offOpcode + 1],
2925 pVCpu->iem.s.abOpcode[offOpcode + 2],
2926 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2927# endif
2928 }
2929# endif
2930 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2931}
2932
2933#endif /* !IEM_WITH_SETJMP */
2934
2935
2936/**
2937 * Fetches the next opcode dword, returns automatically on failure.
2938 *
2939 * @param a_pu32 Where to return the opcode dword.
2940 * @remark Implicitly references pVCpu.
2941 */
2942#ifndef IEM_WITH_SETJMP
2943# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2944 do \
2945 { \
2946 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2947 if (rcStrict2 != VINF_SUCCESS) \
2948 return rcStrict2; \
2949 } while (0)
2950#else
2951# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2952#endif
2953
2954#ifndef IEM_WITH_SETJMP
2955
2956/**
2957 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2958 *
2959 * @returns Strict VBox status code.
2960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2961 * @param pu64 Where to return the opcode dword.
2962 */
2963DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2964{
2965 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2966 if (rcStrict == VINF_SUCCESS)
2967 {
2968 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2969 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2970 pVCpu->iem.s.abOpcode[offOpcode + 1],
2971 pVCpu->iem.s.abOpcode[offOpcode + 2],
2972 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2973 pVCpu->iem.s.offOpcode = offOpcode + 4;
2974 }
2975 else
2976 *pu64 = 0;
2977 return rcStrict;
2978}
2979
2980
2981/**
2982 * Fetches the next opcode dword, zero extending it to a quad word.
2983 *
2984 * @returns Strict VBox status code.
2985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2986 * @param pu64 Where to return the opcode quad word.
2987 */
2988DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2989{
2990 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2991 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2992 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2993
2994 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2995 pVCpu->iem.s.abOpcode[offOpcode + 1],
2996 pVCpu->iem.s.abOpcode[offOpcode + 2],
2997 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2998 pVCpu->iem.s.offOpcode = offOpcode + 4;
2999 return VINF_SUCCESS;
3000}
3001
3002#endif /* !IEM_WITH_SETJMP */
3003
3004
3005/**
3006 * Fetches the next opcode dword and zero extends it to a quad word, returns
3007 * automatically on failure.
3008 *
3009 * @param a_pu64 Where to return the opcode quad word.
3010 * @remark Implicitly references pVCpu.
3011 */
3012#ifndef IEM_WITH_SETJMP
3013# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3014 do \
3015 { \
3016 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3017 if (rcStrict2 != VINF_SUCCESS) \
3018 return rcStrict2; \
3019 } while (0)
3020#else
3021# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3022#endif
3023
3024
3025#ifndef IEM_WITH_SETJMP
3026/**
3027 * Fetches the next signed double word from the opcode stream.
3028 *
3029 * @returns Strict VBox status code.
3030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3031 * @param pi32 Where to return the signed double word.
3032 */
3033DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3034{
3035 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3036}
3037#endif
3038
3039/**
3040 * Fetches the next signed double word from the opcode stream, returning
3041 * automatically on failure.
3042 *
3043 * @param a_pi32 Where to return the signed double word.
3044 * @remark Implicitly references pVCpu.
3045 */
3046#ifndef IEM_WITH_SETJMP
3047# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3048 do \
3049 { \
3050 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3051 if (rcStrict2 != VINF_SUCCESS) \
3052 return rcStrict2; \
3053 } while (0)
3054#else
3055# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3056#endif
3057
3058#ifndef IEM_WITH_SETJMP
3059
3060/**
3061 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3062 *
3063 * @returns Strict VBox status code.
3064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3065 * @param pu64 Where to return the opcode qword.
3066 */
3067DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3068{
3069 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3070 if (rcStrict == VINF_SUCCESS)
3071 {
3072 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3073 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3074 pVCpu->iem.s.abOpcode[offOpcode + 1],
3075 pVCpu->iem.s.abOpcode[offOpcode + 2],
3076 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3077 pVCpu->iem.s.offOpcode = offOpcode + 4;
3078 }
3079 else
3080 *pu64 = 0;
3081 return rcStrict;
3082}
3083
3084
3085/**
3086 * Fetches the next opcode dword, sign extending it into a quad word.
3087 *
3088 * @returns Strict VBox status code.
3089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3090 * @param pu64 Where to return the opcode quad word.
3091 */
3092DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3093{
3094 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3095 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3096 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3097
3098 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3099 pVCpu->iem.s.abOpcode[offOpcode + 1],
3100 pVCpu->iem.s.abOpcode[offOpcode + 2],
3101 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3102 *pu64 = i32;
3103 pVCpu->iem.s.offOpcode = offOpcode + 4;
3104 return VINF_SUCCESS;
3105}
3106
3107#endif /* !IEM_WITH_SETJMP */
3108
3109
3110/**
3111 * Fetches the next opcode double word and sign extends it to a quad word,
3112 * returns automatically on failure.
3113 *
3114 * @param a_pu64 Where to return the opcode quad word.
3115 * @remark Implicitly references pVCpu.
3116 */
3117#ifndef IEM_WITH_SETJMP
3118# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3119 do \
3120 { \
3121 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3122 if (rcStrict2 != VINF_SUCCESS) \
3123 return rcStrict2; \
3124 } while (0)
3125#else
3126# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3127#endif
3128
3129#ifndef IEM_WITH_SETJMP
3130
3131/**
3132 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3133 *
3134 * @returns Strict VBox status code.
3135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3136 * @param pu64 Where to return the opcode qword.
3137 */
3138DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3139{
3140 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3141 if (rcStrict == VINF_SUCCESS)
3142 {
3143 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3144# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3145 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3146# else
3147 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3148 pVCpu->iem.s.abOpcode[offOpcode + 1],
3149 pVCpu->iem.s.abOpcode[offOpcode + 2],
3150 pVCpu->iem.s.abOpcode[offOpcode + 3],
3151 pVCpu->iem.s.abOpcode[offOpcode + 4],
3152 pVCpu->iem.s.abOpcode[offOpcode + 5],
3153 pVCpu->iem.s.abOpcode[offOpcode + 6],
3154 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3155# endif
3156 pVCpu->iem.s.offOpcode = offOpcode + 8;
3157 }
3158 else
3159 *pu64 = 0;
3160 return rcStrict;
3161}
3162
3163
3164/**
3165 * Fetches the next opcode qword.
3166 *
3167 * @returns Strict VBox status code.
3168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3169 * @param pu64 Where to return the opcode qword.
3170 */
3171DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3172{
3173 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3174 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3175 {
3176# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3177 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3178# else
3179 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3180 pVCpu->iem.s.abOpcode[offOpcode + 1],
3181 pVCpu->iem.s.abOpcode[offOpcode + 2],
3182 pVCpu->iem.s.abOpcode[offOpcode + 3],
3183 pVCpu->iem.s.abOpcode[offOpcode + 4],
3184 pVCpu->iem.s.abOpcode[offOpcode + 5],
3185 pVCpu->iem.s.abOpcode[offOpcode + 6],
3186 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3187# endif
3188 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3189 return VINF_SUCCESS;
3190 }
3191 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3192}
3193
3194#else /* IEM_WITH_SETJMP */
3195
3196/**
3197 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3198 *
3199 * @returns The opcode qword.
3200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3201 */
3202DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3203{
3204# ifdef IEM_WITH_CODE_TLB
3205 uint64_t u64;
3206 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3207 return u64;
3208# else
3209 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3210 if (rcStrict == VINF_SUCCESS)
3211 {
3212 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3213 pVCpu->iem.s.offOpcode = offOpcode + 8;
3214# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3215 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3216# else
3217 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3218 pVCpu->iem.s.abOpcode[offOpcode + 1],
3219 pVCpu->iem.s.abOpcode[offOpcode + 2],
3220 pVCpu->iem.s.abOpcode[offOpcode + 3],
3221 pVCpu->iem.s.abOpcode[offOpcode + 4],
3222 pVCpu->iem.s.abOpcode[offOpcode + 5],
3223 pVCpu->iem.s.abOpcode[offOpcode + 6],
3224 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3225# endif
3226 }
3227 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3228# endif
3229}
3230
3231
3232/**
3233 * Fetches the next opcode qword, longjmp on error.
3234 *
3235 * @returns The opcode qword.
3236 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3237 */
3238DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3239{
3240# ifdef IEM_WITH_CODE_TLB
3241 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3242 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3243 if (RT_LIKELY( pbBuf != NULL
3244 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3245 {
3246 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3247# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3248 return *(uint64_t const *)&pbBuf[offBuf];
3249# else
3250 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3251 pbBuf[offBuf + 1],
3252 pbBuf[offBuf + 2],
3253 pbBuf[offBuf + 3],
3254 pbBuf[offBuf + 4],
3255 pbBuf[offBuf + 5],
3256 pbBuf[offBuf + 6],
3257 pbBuf[offBuf + 7]);
3258# endif
3259 }
3260# else
3261 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3262 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3263 {
3264 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3265# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3266 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3267# else
3268 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3269 pVCpu->iem.s.abOpcode[offOpcode + 1],
3270 pVCpu->iem.s.abOpcode[offOpcode + 2],
3271 pVCpu->iem.s.abOpcode[offOpcode + 3],
3272 pVCpu->iem.s.abOpcode[offOpcode + 4],
3273 pVCpu->iem.s.abOpcode[offOpcode + 5],
3274 pVCpu->iem.s.abOpcode[offOpcode + 6],
3275 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3276# endif
3277 }
3278# endif
3279 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3280}
3281
3282#endif /* IEM_WITH_SETJMP */
3283
3284/**
3285 * Fetches the next opcode quad word, returns automatically on failure.
3286 *
3287 * @param a_pu64 Where to return the opcode quad word.
3288 * @remark Implicitly references pVCpu.
3289 */
3290#ifndef IEM_WITH_SETJMP
3291# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3292 do \
3293 { \
3294 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3295 if (rcStrict2 != VINF_SUCCESS) \
3296 return rcStrict2; \
3297 } while (0)
3298#else
3299# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3300#endif
3301
3302
3303/** @name Misc Worker Functions.
3304 * @{
3305 */
3306
3307/**
3308 * Gets the exception class for the specified exception vector.
3309 *
3310 * @returns The class of the specified exception.
3311 * @param uVector The exception vector.
3312 */
3313IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3314{
3315 Assert(uVector <= X86_XCPT_LAST);
3316 switch (uVector)
3317 {
3318 case X86_XCPT_DE:
3319 case X86_XCPT_TS:
3320 case X86_XCPT_NP:
3321 case X86_XCPT_SS:
3322 case X86_XCPT_GP:
3323 case X86_XCPT_SX: /* AMD only */
3324 return IEMXCPTCLASS_CONTRIBUTORY;
3325
3326 case X86_XCPT_PF:
3327 case X86_XCPT_VE: /* Intel only */
3328 return IEMXCPTCLASS_PAGE_FAULT;
3329
3330 case X86_XCPT_DF:
3331 return IEMXCPTCLASS_DOUBLE_FAULT;
3332 }
3333 return IEMXCPTCLASS_BENIGN;
3334}
3335
3336
3337/**
3338 * Evaluates how to handle an exception caused during delivery of another event
3339 * (exception / interrupt).
3340 *
3341 * @returns How to handle the recursive exception.
3342 * @param pVCpu The cross context virtual CPU structure of the
3343 * calling thread.
3344 * @param fPrevFlags The flags of the previous event.
3345 * @param uPrevVector The vector of the previous event.
3346 * @param fCurFlags The flags of the current exception.
3347 * @param uCurVector The vector of the current exception.
3348 * @param pfXcptRaiseInfo Where to store additional information about the
3349 * exception condition. Optional.
3350 */
3351VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3352 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3353{
3354 /*
3355 * Only CPU exceptions can be raised while delivering other events, software interrupt
3356 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3357 */
3358 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3359 Assert(pVCpu); RT_NOREF(pVCpu);
3360 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3361
3362 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3363 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3364 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3365 {
3366 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3367 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3368 {
3369 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3370 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3371 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3372 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3373 {
3374 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3375 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3376 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3377 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3378 uCurVector, pVCpu->cpum.GstCtx.cr2));
3379 }
3380 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3381 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3382 {
3383 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3384 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3385 }
3386 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3387 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3388 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3389 {
3390 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3391 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3392 }
3393 }
3394 else
3395 {
3396 if (uPrevVector == X86_XCPT_NMI)
3397 {
3398 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3399 if (uCurVector == X86_XCPT_PF)
3400 {
3401 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3402 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3403 }
3404 }
3405 else if ( uPrevVector == X86_XCPT_AC
3406 && uCurVector == X86_XCPT_AC)
3407 {
3408 enmRaise = IEMXCPTRAISE_CPU_HANG;
3409 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3410 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3411 }
3412 }
3413 }
3414 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3415 {
3416 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3417 if (uCurVector == X86_XCPT_PF)
3418 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3419 }
3420 else
3421 {
3422 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3423 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3424 }
3425
3426 if (pfXcptRaiseInfo)
3427 *pfXcptRaiseInfo = fRaiseInfo;
3428 return enmRaise;
3429}
3430
3431
3432/**
3433 * Enters the CPU shutdown state initiated by a triple fault or other
3434 * unrecoverable conditions.
3435 *
3436 * @returns Strict VBox status code.
3437 * @param pVCpu The cross context virtual CPU structure of the
3438 * calling thread.
3439 */
3440IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3441{
3442 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3443 {
3444 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3445 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3446 }
3447
3448 RT_NOREF(pVCpu);
3449 return VINF_EM_TRIPLE_FAULT;
3450}
3451
3452
3453/**
3454 * Validates a new SS segment.
3455 *
3456 * @returns VBox strict status code.
3457 * @param pVCpu The cross context virtual CPU structure of the
3458 * calling thread.
3459 * @param NewSS The new SS selctor.
3460 * @param uCpl The CPL to load the stack for.
3461 * @param pDesc Where to return the descriptor.
3462 */
3463IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3464{
3465 /* Null selectors are not allowed (we're not called for dispatching
3466 interrupts with SS=0 in long mode). */
3467 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3468 {
3469 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3470 return iemRaiseTaskSwitchFault0(pVCpu);
3471 }
3472
3473 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3474 if ((NewSS & X86_SEL_RPL) != uCpl)
3475 {
3476 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3477 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3478 }
3479
3480 /*
3481 * Read the descriptor.
3482 */
3483 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3484 if (rcStrict != VINF_SUCCESS)
3485 return rcStrict;
3486
3487 /*
3488 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3489 */
3490 if (!pDesc->Legacy.Gen.u1DescType)
3491 {
3492 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3493 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3494 }
3495
3496 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3497 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3498 {
3499 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3500 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3501 }
3502 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3503 {
3504 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3505 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3506 }
3507
3508 /* Is it there? */
3509 /** @todo testcase: Is this checked before the canonical / limit check below? */
3510 if (!pDesc->Legacy.Gen.u1Present)
3511 {
3512 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3513 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3514 }
3515
3516 return VINF_SUCCESS;
3517}
3518
3519
3520/**
3521 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3522 * not.
3523 *
3524 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3525 */
3526#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3527# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3528#else
3529# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3530#endif
3531
3532/**
3533 * Updates the EFLAGS in the correct manner wrt. PATM.
3534 *
3535 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3536 * @param a_fEfl The new EFLAGS.
3537 */
3538#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3539# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3540#else
3541# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3542#endif
3543
3544
3545/** @} */
3546
3547/** @name Raising Exceptions.
3548 *
3549 * @{
3550 */
3551
3552
3553/**
3554 * Loads the specified stack far pointer from the TSS.
3555 *
3556 * @returns VBox strict status code.
3557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3558 * @param uCpl The CPL to load the stack for.
3559 * @param pSelSS Where to return the new stack segment.
3560 * @param puEsp Where to return the new stack pointer.
3561 */
3562IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3563{
3564 VBOXSTRICTRC rcStrict;
3565 Assert(uCpl < 4);
3566
3567 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3568 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3569 {
3570 /*
3571 * 16-bit TSS (X86TSS16).
3572 */
3573 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3574 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3575 {
3576 uint32_t off = uCpl * 4 + 2;
3577 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3578 {
3579 /** @todo check actual access pattern here. */
3580 uint32_t u32Tmp = 0; /* gcc maybe... */
3581 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3582 if (rcStrict == VINF_SUCCESS)
3583 {
3584 *puEsp = RT_LOWORD(u32Tmp);
3585 *pSelSS = RT_HIWORD(u32Tmp);
3586 return VINF_SUCCESS;
3587 }
3588 }
3589 else
3590 {
3591 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3592 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3593 }
3594 break;
3595 }
3596
3597 /*
3598 * 32-bit TSS (X86TSS32).
3599 */
3600 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3601 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3602 {
3603 uint32_t off = uCpl * 8 + 4;
3604 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3605 {
3606/** @todo check actual access pattern here. */
3607 uint64_t u64Tmp;
3608 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3609 if (rcStrict == VINF_SUCCESS)
3610 {
3611 *puEsp = u64Tmp & UINT32_MAX;
3612 *pSelSS = (RTSEL)(u64Tmp >> 32);
3613 return VINF_SUCCESS;
3614 }
3615 }
3616 else
3617 {
3618 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3619 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3620 }
3621 break;
3622 }
3623
3624 default:
3625 AssertFailed();
3626 rcStrict = VERR_IEM_IPE_4;
3627 break;
3628 }
3629
3630 *puEsp = 0; /* make gcc happy */
3631 *pSelSS = 0; /* make gcc happy */
3632 return rcStrict;
3633}
3634
3635
3636/**
3637 * Loads the specified stack pointer from the 64-bit TSS.
3638 *
3639 * @returns VBox strict status code.
3640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3641 * @param uCpl The CPL to load the stack for.
3642 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3643 * @param puRsp Where to return the new stack pointer.
3644 */
3645IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3646{
3647 Assert(uCpl < 4);
3648 Assert(uIst < 8);
3649 *puRsp = 0; /* make gcc happy */
3650
3651 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3652 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3653
3654 uint32_t off;
3655 if (uIst)
3656 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3657 else
3658 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3659 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3660 {
3661 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3662 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3663 }
3664
3665 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3666}
3667
3668
3669/**
3670 * Adjust the CPU state according to the exception being raised.
3671 *
3672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3673 * @param u8Vector The exception that has been raised.
3674 */
3675DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3676{
3677 switch (u8Vector)
3678 {
3679 case X86_XCPT_DB:
3680 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3681 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3682 break;
3683 /** @todo Read the AMD and Intel exception reference... */
3684 }
3685}
3686
3687
3688/**
3689 * Implements exceptions and interrupts for real mode.
3690 *
3691 * @returns VBox strict status code.
3692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3693 * @param cbInstr The number of bytes to offset rIP by in the return
3694 * address.
3695 * @param u8Vector The interrupt / exception vector number.
3696 * @param fFlags The flags.
3697 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3698 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3699 */
3700IEM_STATIC VBOXSTRICTRC
3701iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3702 uint8_t cbInstr,
3703 uint8_t u8Vector,
3704 uint32_t fFlags,
3705 uint16_t uErr,
3706 uint64_t uCr2)
3707{
3708 NOREF(uErr); NOREF(uCr2);
3709 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3710
3711 /*
3712 * Read the IDT entry.
3713 */
3714 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3715 {
3716 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3717 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3718 }
3719 RTFAR16 Idte;
3720 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3721 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3722 {
3723 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3724 return rcStrict;
3725 }
3726
3727 /*
3728 * Push the stack frame.
3729 */
3730 uint16_t *pu16Frame;
3731 uint64_t uNewRsp;
3732 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3733 if (rcStrict != VINF_SUCCESS)
3734 return rcStrict;
3735
3736 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3737#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3738 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3739 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3740 fEfl |= UINT16_C(0xf000);
3741#endif
3742 pu16Frame[2] = (uint16_t)fEfl;
3743 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3744 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3745 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3746 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3747 return rcStrict;
3748
3749 /*
3750 * Load the vector address into cs:ip and make exception specific state
3751 * adjustments.
3752 */
3753 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3754 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3755 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3756 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3757 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3758 pVCpu->cpum.GstCtx.rip = Idte.off;
3759 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3760 IEMMISC_SET_EFL(pVCpu, fEfl);
3761
3762 /** @todo do we actually do this in real mode? */
3763 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3764 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3765
3766 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3767}
3768
3769
3770/**
3771 * Loads a NULL data selector into when coming from V8086 mode.
3772 *
3773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3774 * @param pSReg Pointer to the segment register.
3775 */
3776IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3777{
3778 pSReg->Sel = 0;
3779 pSReg->ValidSel = 0;
3780 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3781 {
3782 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3783 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3784 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3785 }
3786 else
3787 {
3788 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3789 /** @todo check this on AMD-V */
3790 pSReg->u64Base = 0;
3791 pSReg->u32Limit = 0;
3792 }
3793}
3794
3795
3796/**
3797 * Loads a segment selector during a task switch in V8086 mode.
3798 *
3799 * @param pSReg Pointer to the segment register.
3800 * @param uSel The selector value to load.
3801 */
3802IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3803{
3804 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3805 pSReg->Sel = uSel;
3806 pSReg->ValidSel = uSel;
3807 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3808 pSReg->u64Base = uSel << 4;
3809 pSReg->u32Limit = 0xffff;
3810 pSReg->Attr.u = 0xf3;
3811}
3812
3813
3814/**
3815 * Loads a NULL data selector into a selector register, both the hidden and
3816 * visible parts, in protected mode.
3817 *
3818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3819 * @param pSReg Pointer to the segment register.
3820 * @param uRpl The RPL.
3821 */
3822IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3823{
3824 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3825 * data selector in protected mode. */
3826 pSReg->Sel = uRpl;
3827 pSReg->ValidSel = uRpl;
3828 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3829 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3830 {
3831 /* VT-x (Intel 3960x) observed doing something like this. */
3832 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3833 pSReg->u32Limit = UINT32_MAX;
3834 pSReg->u64Base = 0;
3835 }
3836 else
3837 {
3838 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3839 pSReg->u32Limit = 0;
3840 pSReg->u64Base = 0;
3841 }
3842}
3843
3844
3845/**
3846 * Loads a segment selector during a task switch in protected mode.
3847 *
3848 * In this task switch scenario, we would throw \#TS exceptions rather than
3849 * \#GPs.
3850 *
3851 * @returns VBox strict status code.
3852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3853 * @param pSReg Pointer to the segment register.
3854 * @param uSel The new selector value.
3855 *
3856 * @remarks This does _not_ handle CS or SS.
3857 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3858 */
3859IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3860{
3861 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3862
3863 /* Null data selector. */
3864 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3865 {
3866 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3867 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3868 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3869 return VINF_SUCCESS;
3870 }
3871
3872 /* Fetch the descriptor. */
3873 IEMSELDESC Desc;
3874 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3875 if (rcStrict != VINF_SUCCESS)
3876 {
3877 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3878 VBOXSTRICTRC_VAL(rcStrict)));
3879 return rcStrict;
3880 }
3881
3882 /* Must be a data segment or readable code segment. */
3883 if ( !Desc.Legacy.Gen.u1DescType
3884 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3885 {
3886 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3887 Desc.Legacy.Gen.u4Type));
3888 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3889 }
3890
3891 /* Check privileges for data segments and non-conforming code segments. */
3892 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3893 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3894 {
3895 /* The RPL and the new CPL must be less than or equal to the DPL. */
3896 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3897 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3898 {
3899 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3900 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3901 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3902 }
3903 }
3904
3905 /* Is it there? */
3906 if (!Desc.Legacy.Gen.u1Present)
3907 {
3908 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3909 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3910 }
3911
3912 /* The base and limit. */
3913 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3914 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3915
3916 /*
3917 * Ok, everything checked out fine. Now set the accessed bit before
3918 * committing the result into the registers.
3919 */
3920 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3921 {
3922 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3923 if (rcStrict != VINF_SUCCESS)
3924 return rcStrict;
3925 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3926 }
3927
3928 /* Commit */
3929 pSReg->Sel = uSel;
3930 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3931 pSReg->u32Limit = cbLimit;
3932 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3933 pSReg->ValidSel = uSel;
3934 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3935 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3936 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3937
3938 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3939 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3940 return VINF_SUCCESS;
3941}
3942
3943
3944/**
3945 * Performs a task switch.
3946 *
3947 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3948 * caller is responsible for performing the necessary checks (like DPL, TSS
3949 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3950 * reference for JMP, CALL, IRET.
3951 *
3952 * If the task switch is the due to a software interrupt or hardware exception,
3953 * the caller is responsible for validating the TSS selector and descriptor. See
3954 * Intel Instruction reference for INT n.
3955 *
3956 * @returns VBox strict status code.
3957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3958 * @param enmTaskSwitch What caused this task switch.
3959 * @param uNextEip The EIP effective after the task switch.
3960 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3961 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3962 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3963 * @param SelTSS The TSS selector of the new task.
3964 * @param pNewDescTSS Pointer to the new TSS descriptor.
3965 */
3966IEM_STATIC VBOXSTRICTRC
3967iemTaskSwitch(PVMCPU pVCpu,
3968 IEMTASKSWITCH enmTaskSwitch,
3969 uint32_t uNextEip,
3970 uint32_t fFlags,
3971 uint16_t uErr,
3972 uint64_t uCr2,
3973 RTSEL SelTSS,
3974 PIEMSELDESC pNewDescTSS)
3975{
3976 Assert(!IEM_IS_REAL_MODE(pVCpu));
3977 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3978 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3979
3980 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3981 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3982 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3983 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3984 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3985
3986 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3987 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3988
3989 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3990 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3991
3992 /* Update CR2 in case it's a page-fault. */
3993 /** @todo This should probably be done much earlier in IEM/PGM. See
3994 * @bugref{5653#c49}. */
3995 if (fFlags & IEM_XCPT_FLAGS_CR2)
3996 pVCpu->cpum.GstCtx.cr2 = uCr2;
3997
3998 /*
3999 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4000 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4001 */
4002 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4003 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4004 if (uNewTSSLimit < uNewTSSLimitMin)
4005 {
4006 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4007 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4008 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4009 }
4010
4011 /*
4012 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4013 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4014 */
4015 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4016 {
4017 uint32_t const uExitInfo1 = SelTSS;
4018 uint32_t uExitInfo2 = uErr;
4019 switch (enmTaskSwitch)
4020 {
4021 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4022 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4023 default: break;
4024 }
4025 if (fFlags & IEM_XCPT_FLAGS_ERR)
4026 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4027 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4028 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4029
4030 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4031 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4032 RT_NOREF2(uExitInfo1, uExitInfo2);
4033 }
4034 /** @todo Nested-VMX task-switch intercept. */
4035
4036 /*
4037 * Check the current TSS limit. The last written byte to the current TSS during the
4038 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4039 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4040 *
4041 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4042 * end up with smaller than "legal" TSS limits.
4043 */
4044 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4045 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4046 if (uCurTSSLimit < uCurTSSLimitMin)
4047 {
4048 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4049 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4050 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4051 }
4052
4053 /*
4054 * Verify that the new TSS can be accessed and map it. Map only the required contents
4055 * and not the entire TSS.
4056 */
4057 void *pvNewTSS;
4058 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4059 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4060 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4061 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4062 * not perform correct translation if this happens. See Intel spec. 7.2.1
4063 * "Task-State Segment" */
4064 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4065 if (rcStrict != VINF_SUCCESS)
4066 {
4067 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4068 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4069 return rcStrict;
4070 }
4071
4072 /*
4073 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4074 */
4075 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4076 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4077 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4078 {
4079 PX86DESC pDescCurTSS;
4080 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4081 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4082 if (rcStrict != VINF_SUCCESS)
4083 {
4084 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4085 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4086 return rcStrict;
4087 }
4088
4089 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4090 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4091 if (rcStrict != VINF_SUCCESS)
4092 {
4093 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4094 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4095 return rcStrict;
4096 }
4097
4098 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4099 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4100 {
4101 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4102 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4103 u32EFlags &= ~X86_EFL_NT;
4104 }
4105 }
4106
4107 /*
4108 * Save the CPU state into the current TSS.
4109 */
4110 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4111 if (GCPtrNewTSS == GCPtrCurTSS)
4112 {
4113 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4114 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4115 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ldtr.Sel));
4116 }
4117 if (fIsNewTSS386)
4118 {
4119 /*
4120 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4121 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4122 */
4123 void *pvCurTSS32;
4124 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4125 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4126 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4127 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4128 if (rcStrict != VINF_SUCCESS)
4129 {
4130 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4131 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4132 return rcStrict;
4133 }
4134
4135 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4136 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4137 pCurTSS32->eip = uNextEip;
4138 pCurTSS32->eflags = u32EFlags;
4139 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4140 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4141 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4142 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4143 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4144 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4145 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4146 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4147 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4148 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4149 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4150 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4151 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4152 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4153
4154 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4155 if (rcStrict != VINF_SUCCESS)
4156 {
4157 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4158 VBOXSTRICTRC_VAL(rcStrict)));
4159 return rcStrict;
4160 }
4161 }
4162 else
4163 {
4164 /*
4165 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4166 */
4167 void *pvCurTSS16;
4168 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4169 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4170 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4171 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4172 if (rcStrict != VINF_SUCCESS)
4173 {
4174 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4175 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4176 return rcStrict;
4177 }
4178
4179 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4180 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4181 pCurTSS16->ip = uNextEip;
4182 pCurTSS16->flags = u32EFlags;
4183 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4184 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4185 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4186 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4187 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4188 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4189 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4190 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4191 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4192 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4193 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4194 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4195
4196 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4197 if (rcStrict != VINF_SUCCESS)
4198 {
4199 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4200 VBOXSTRICTRC_VAL(rcStrict)));
4201 return rcStrict;
4202 }
4203 }
4204
4205 /*
4206 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4207 */
4208 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4209 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4210 {
4211 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4212 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4213 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4214 }
4215
4216 /*
4217 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4218 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4219 */
4220 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4221 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4222 bool fNewDebugTrap;
4223 if (fIsNewTSS386)
4224 {
4225 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4226 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4227 uNewEip = pNewTSS32->eip;
4228 uNewEflags = pNewTSS32->eflags;
4229 uNewEax = pNewTSS32->eax;
4230 uNewEcx = pNewTSS32->ecx;
4231 uNewEdx = pNewTSS32->edx;
4232 uNewEbx = pNewTSS32->ebx;
4233 uNewEsp = pNewTSS32->esp;
4234 uNewEbp = pNewTSS32->ebp;
4235 uNewEsi = pNewTSS32->esi;
4236 uNewEdi = pNewTSS32->edi;
4237 uNewES = pNewTSS32->es;
4238 uNewCS = pNewTSS32->cs;
4239 uNewSS = pNewTSS32->ss;
4240 uNewDS = pNewTSS32->ds;
4241 uNewFS = pNewTSS32->fs;
4242 uNewGS = pNewTSS32->gs;
4243 uNewLdt = pNewTSS32->selLdt;
4244 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4245 }
4246 else
4247 {
4248 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4249 uNewCr3 = 0;
4250 uNewEip = pNewTSS16->ip;
4251 uNewEflags = pNewTSS16->flags;
4252 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4253 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4254 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4255 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4256 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4257 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4258 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4259 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4260 uNewES = pNewTSS16->es;
4261 uNewCS = pNewTSS16->cs;
4262 uNewSS = pNewTSS16->ss;
4263 uNewDS = pNewTSS16->ds;
4264 uNewFS = 0;
4265 uNewGS = 0;
4266 uNewLdt = pNewTSS16->selLdt;
4267 fNewDebugTrap = false;
4268 }
4269
4270 if (GCPtrNewTSS == GCPtrCurTSS)
4271 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4272 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4273
4274 /*
4275 * We're done accessing the new TSS.
4276 */
4277 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4278 if (rcStrict != VINF_SUCCESS)
4279 {
4280 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4281 return rcStrict;
4282 }
4283
4284 /*
4285 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4286 */
4287 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4288 {
4289 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4290 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4291 if (rcStrict != VINF_SUCCESS)
4292 {
4293 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4294 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4295 return rcStrict;
4296 }
4297
4298 /* Check that the descriptor indicates the new TSS is available (not busy). */
4299 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4300 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4301 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4302
4303 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4304 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4305 if (rcStrict != VINF_SUCCESS)
4306 {
4307 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4308 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4309 return rcStrict;
4310 }
4311 }
4312
4313 /*
4314 * From this point on, we're technically in the new task. We will defer exceptions
4315 * until the completion of the task switch but before executing any instructions in the new task.
4316 */
4317 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4318 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4319 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4320 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4321 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4322 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4323 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4324
4325 /* Set the busy bit in TR. */
4326 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4327 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4328 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4329 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4330 {
4331 uNewEflags |= X86_EFL_NT;
4332 }
4333
4334 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4335 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4336 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4337
4338 pVCpu->cpum.GstCtx.eip = uNewEip;
4339 pVCpu->cpum.GstCtx.eax = uNewEax;
4340 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4341 pVCpu->cpum.GstCtx.edx = uNewEdx;
4342 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4343 pVCpu->cpum.GstCtx.esp = uNewEsp;
4344 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4345 pVCpu->cpum.GstCtx.esi = uNewEsi;
4346 pVCpu->cpum.GstCtx.edi = uNewEdi;
4347
4348 uNewEflags &= X86_EFL_LIVE_MASK;
4349 uNewEflags |= X86_EFL_RA1_MASK;
4350 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4351
4352 /*
4353 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4354 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4355 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4356 */
4357 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4358 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4359
4360 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4361 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4362
4363 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4364 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4365
4366 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4367 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4368
4369 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4370 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4371
4372 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4373 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4374 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4375
4376 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4377 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4378 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4379 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4380
4381 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4382 {
4383 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4384 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4385 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4386 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4387 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4388 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4389 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4390 }
4391
4392 /*
4393 * Switch CR3 for the new task.
4394 */
4395 if ( fIsNewTSS386
4396 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4397 {
4398 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4399 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4400 AssertRCSuccessReturn(rc, rc);
4401
4402 /* Inform PGM. */
4403 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4404 AssertRCReturn(rc, rc);
4405 /* ignore informational status codes */
4406
4407 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4408 }
4409
4410 /*
4411 * Switch LDTR for the new task.
4412 */
4413 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4414 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4415 else
4416 {
4417 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4418
4419 IEMSELDESC DescNewLdt;
4420 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4421 if (rcStrict != VINF_SUCCESS)
4422 {
4423 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4424 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4425 return rcStrict;
4426 }
4427 if ( !DescNewLdt.Legacy.Gen.u1Present
4428 || DescNewLdt.Legacy.Gen.u1DescType
4429 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4430 {
4431 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4432 uNewLdt, DescNewLdt.Legacy.u));
4433 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4434 }
4435
4436 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4437 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4438 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4439 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4440 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4441 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4442 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4443 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4444 }
4445
4446 IEMSELDESC DescSS;
4447 if (IEM_IS_V86_MODE(pVCpu))
4448 {
4449 pVCpu->iem.s.uCpl = 3;
4450 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4451 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4452 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4453 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4454 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4455 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4456
4457 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4458 DescSS.Legacy.u = 0;
4459 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4460 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4461 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4462 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4463 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4464 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4465 DescSS.Legacy.Gen.u2Dpl = 3;
4466 }
4467 else
4468 {
4469 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4470
4471 /*
4472 * Load the stack segment for the new task.
4473 */
4474 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4475 {
4476 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4477 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4478 }
4479
4480 /* Fetch the descriptor. */
4481 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4482 if (rcStrict != VINF_SUCCESS)
4483 {
4484 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4485 VBOXSTRICTRC_VAL(rcStrict)));
4486 return rcStrict;
4487 }
4488
4489 /* SS must be a data segment and writable. */
4490 if ( !DescSS.Legacy.Gen.u1DescType
4491 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4492 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4493 {
4494 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4495 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4496 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4497 }
4498
4499 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4500 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4501 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4502 {
4503 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4504 uNewCpl));
4505 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4506 }
4507
4508 /* Is it there? */
4509 if (!DescSS.Legacy.Gen.u1Present)
4510 {
4511 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4512 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4513 }
4514
4515 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4516 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4517
4518 /* Set the accessed bit before committing the result into SS. */
4519 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4520 {
4521 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4522 if (rcStrict != VINF_SUCCESS)
4523 return rcStrict;
4524 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4525 }
4526
4527 /* Commit SS. */
4528 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4529 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4530 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4531 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4532 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4533 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4534 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4535
4536 /* CPL has changed, update IEM before loading rest of segments. */
4537 pVCpu->iem.s.uCpl = uNewCpl;
4538
4539 /*
4540 * Load the data segments for the new task.
4541 */
4542 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4543 if (rcStrict != VINF_SUCCESS)
4544 return rcStrict;
4545 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4546 if (rcStrict != VINF_SUCCESS)
4547 return rcStrict;
4548 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4549 if (rcStrict != VINF_SUCCESS)
4550 return rcStrict;
4551 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4552 if (rcStrict != VINF_SUCCESS)
4553 return rcStrict;
4554
4555 /*
4556 * Load the code segment for the new task.
4557 */
4558 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4559 {
4560 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4561 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4562 }
4563
4564 /* Fetch the descriptor. */
4565 IEMSELDESC DescCS;
4566 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4567 if (rcStrict != VINF_SUCCESS)
4568 {
4569 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4570 return rcStrict;
4571 }
4572
4573 /* CS must be a code segment. */
4574 if ( !DescCS.Legacy.Gen.u1DescType
4575 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4576 {
4577 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4578 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4579 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4580 }
4581
4582 /* For conforming CS, DPL must be less than or equal to the RPL. */
4583 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4584 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4585 {
4586 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4587 DescCS.Legacy.Gen.u2Dpl));
4588 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4589 }
4590
4591 /* For non-conforming CS, DPL must match RPL. */
4592 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4593 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4594 {
4595 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4596 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4597 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4598 }
4599
4600 /* Is it there? */
4601 if (!DescCS.Legacy.Gen.u1Present)
4602 {
4603 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4604 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4605 }
4606
4607 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4608 u64Base = X86DESC_BASE(&DescCS.Legacy);
4609
4610 /* Set the accessed bit before committing the result into CS. */
4611 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4612 {
4613 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4614 if (rcStrict != VINF_SUCCESS)
4615 return rcStrict;
4616 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4617 }
4618
4619 /* Commit CS. */
4620 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4621 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4622 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4623 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4624 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4625 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4626 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4627 }
4628
4629 /** @todo Debug trap. */
4630 if (fIsNewTSS386 && fNewDebugTrap)
4631 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4632
4633 /*
4634 * Construct the error code masks based on what caused this task switch.
4635 * See Intel Instruction reference for INT.
4636 */
4637 uint16_t uExt;
4638 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4639 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4640 {
4641 uExt = 1;
4642 }
4643 else
4644 uExt = 0;
4645
4646 /*
4647 * Push any error code on to the new stack.
4648 */
4649 if (fFlags & IEM_XCPT_FLAGS_ERR)
4650 {
4651 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4652 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4653 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4654
4655 /* Check that there is sufficient space on the stack. */
4656 /** @todo Factor out segment limit checking for normal/expand down segments
4657 * into a separate function. */
4658 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4659 {
4660 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4661 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4662 {
4663 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4664 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4665 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4666 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4667 }
4668 }
4669 else
4670 {
4671 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4672 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4673 {
4674 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4675 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4676 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4677 }
4678 }
4679
4680
4681 if (fIsNewTSS386)
4682 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4683 else
4684 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4685 if (rcStrict != VINF_SUCCESS)
4686 {
4687 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4688 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4689 return rcStrict;
4690 }
4691 }
4692
4693 /* Check the new EIP against the new CS limit. */
4694 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4695 {
4696 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4697 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4698 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4699 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4700 }
4701
4702 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.ss.Sel));
4703 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4704}
4705
4706
4707/**
4708 * Implements exceptions and interrupts for protected mode.
4709 *
4710 * @returns VBox strict status code.
4711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4712 * @param cbInstr The number of bytes to offset rIP by in the return
4713 * address.
4714 * @param u8Vector The interrupt / exception vector number.
4715 * @param fFlags The flags.
4716 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4717 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4718 */
4719IEM_STATIC VBOXSTRICTRC
4720iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4721 uint8_t cbInstr,
4722 uint8_t u8Vector,
4723 uint32_t fFlags,
4724 uint16_t uErr,
4725 uint64_t uCr2)
4726{
4727 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4728
4729 /*
4730 * Read the IDT entry.
4731 */
4732 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4733 {
4734 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4735 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4736 }
4737 X86DESC Idte;
4738 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4739 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4740 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4741 {
4742 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4743 return rcStrict;
4744 }
4745 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4746 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4747 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4748
4749 /*
4750 * Check the descriptor type, DPL and such.
4751 * ASSUMES this is done in the same order as described for call-gate calls.
4752 */
4753 if (Idte.Gate.u1DescType)
4754 {
4755 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4756 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4757 }
4758 bool fTaskGate = false;
4759 uint8_t f32BitGate = true;
4760 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4761 switch (Idte.Gate.u4Type)
4762 {
4763 case X86_SEL_TYPE_SYS_UNDEFINED:
4764 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4765 case X86_SEL_TYPE_SYS_LDT:
4766 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4767 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4768 case X86_SEL_TYPE_SYS_UNDEFINED2:
4769 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4770 case X86_SEL_TYPE_SYS_UNDEFINED3:
4771 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4772 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4773 case X86_SEL_TYPE_SYS_UNDEFINED4:
4774 {
4775 /** @todo check what actually happens when the type is wrong...
4776 * esp. call gates. */
4777 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4778 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4779 }
4780
4781 case X86_SEL_TYPE_SYS_286_INT_GATE:
4782 f32BitGate = false;
4783 RT_FALL_THRU();
4784 case X86_SEL_TYPE_SYS_386_INT_GATE:
4785 fEflToClear |= X86_EFL_IF;
4786 break;
4787
4788 case X86_SEL_TYPE_SYS_TASK_GATE:
4789 fTaskGate = true;
4790#ifndef IEM_IMPLEMENTS_TASKSWITCH
4791 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4792#endif
4793 break;
4794
4795 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4796 f32BitGate = false;
4797 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4798 break;
4799
4800 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4801 }
4802
4803 /* Check DPL against CPL if applicable. */
4804 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4805 {
4806 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4807 {
4808 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4809 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4810 }
4811 }
4812
4813 /* Is it there? */
4814 if (!Idte.Gate.u1Present)
4815 {
4816 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4817 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4818 }
4819
4820 /* Is it a task-gate? */
4821 if (fTaskGate)
4822 {
4823 /*
4824 * Construct the error code masks based on what caused this task switch.
4825 * See Intel Instruction reference for INT.
4826 */
4827 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4828 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4829 RTSEL SelTSS = Idte.Gate.u16Sel;
4830
4831 /*
4832 * Fetch the TSS descriptor in the GDT.
4833 */
4834 IEMSELDESC DescTSS;
4835 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4836 if (rcStrict != VINF_SUCCESS)
4837 {
4838 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4839 VBOXSTRICTRC_VAL(rcStrict)));
4840 return rcStrict;
4841 }
4842
4843 /* The TSS descriptor must be a system segment and be available (not busy). */
4844 if ( DescTSS.Legacy.Gen.u1DescType
4845 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4846 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4847 {
4848 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4849 u8Vector, SelTSS, DescTSS.Legacy.au64));
4850 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4851 }
4852
4853 /* The TSS must be present. */
4854 if (!DescTSS.Legacy.Gen.u1Present)
4855 {
4856 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4857 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4858 }
4859
4860 /* Do the actual task switch. */
4861 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT, pVCpu->cpum.GstCtx.eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4862 }
4863
4864 /* A null CS is bad. */
4865 RTSEL NewCS = Idte.Gate.u16Sel;
4866 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4867 {
4868 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4869 return iemRaiseGeneralProtectionFault0(pVCpu);
4870 }
4871
4872 /* Fetch the descriptor for the new CS. */
4873 IEMSELDESC DescCS;
4874 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4875 if (rcStrict != VINF_SUCCESS)
4876 {
4877 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4878 return rcStrict;
4879 }
4880
4881 /* Must be a code segment. */
4882 if (!DescCS.Legacy.Gen.u1DescType)
4883 {
4884 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4885 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4886 }
4887 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4888 {
4889 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4890 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4891 }
4892
4893 /* Don't allow lowering the privilege level. */
4894 /** @todo Does the lowering of privileges apply to software interrupts
4895 * only? This has bearings on the more-privileged or
4896 * same-privilege stack behavior further down. A testcase would
4897 * be nice. */
4898 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4899 {
4900 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4901 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4902 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4903 }
4904
4905 /* Make sure the selector is present. */
4906 if (!DescCS.Legacy.Gen.u1Present)
4907 {
4908 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4909 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4910 }
4911
4912 /* Check the new EIP against the new CS limit. */
4913 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4914 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4915 ? Idte.Gate.u16OffsetLow
4916 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4917 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4918 if (uNewEip > cbLimitCS)
4919 {
4920 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4921 u8Vector, uNewEip, cbLimitCS, NewCS));
4922 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4923 }
4924 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4925
4926 /* Calc the flag image to push. */
4927 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4928 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4929 fEfl &= ~X86_EFL_RF;
4930 else
4931 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4932
4933 /* From V8086 mode only go to CPL 0. */
4934 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4935 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4936 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4937 {
4938 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4939 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4940 }
4941
4942 /*
4943 * If the privilege level changes, we need to get a new stack from the TSS.
4944 * This in turns means validating the new SS and ESP...
4945 */
4946 if (uNewCpl != pVCpu->iem.s.uCpl)
4947 {
4948 RTSEL NewSS;
4949 uint32_t uNewEsp;
4950 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4951 if (rcStrict != VINF_SUCCESS)
4952 return rcStrict;
4953
4954 IEMSELDESC DescSS;
4955 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4956 if (rcStrict != VINF_SUCCESS)
4957 return rcStrict;
4958 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4959 if (!DescSS.Legacy.Gen.u1DefBig)
4960 {
4961 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4962 uNewEsp = (uint16_t)uNewEsp;
4963 }
4964
4965 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4966
4967 /* Check that there is sufficient space for the stack frame. */
4968 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4969 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4970 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4971 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4972
4973 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4974 {
4975 if ( uNewEsp - 1 > cbLimitSS
4976 || uNewEsp < cbStackFrame)
4977 {
4978 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4979 u8Vector, NewSS, uNewEsp, cbStackFrame));
4980 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4981 }
4982 }
4983 else
4984 {
4985 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4986 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4987 {
4988 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4989 u8Vector, NewSS, uNewEsp, cbStackFrame));
4990 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4991 }
4992 }
4993
4994 /*
4995 * Start making changes.
4996 */
4997
4998 /* Set the new CPL so that stack accesses use it. */
4999 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5000 pVCpu->iem.s.uCpl = uNewCpl;
5001
5002 /* Create the stack frame. */
5003 RTPTRUNION uStackFrame;
5004 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5005 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5006 if (rcStrict != VINF_SUCCESS)
5007 return rcStrict;
5008 void * const pvStackFrame = uStackFrame.pv;
5009 if (f32BitGate)
5010 {
5011 if (fFlags & IEM_XCPT_FLAGS_ERR)
5012 *uStackFrame.pu32++ = uErr;
5013 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5014 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5015 uStackFrame.pu32[2] = fEfl;
5016 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5017 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5018 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5019 if (fEfl & X86_EFL_VM)
5020 {
5021 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5022 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5023 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5024 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5025 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5026 }
5027 }
5028 else
5029 {
5030 if (fFlags & IEM_XCPT_FLAGS_ERR)
5031 *uStackFrame.pu16++ = uErr;
5032 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5033 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5034 uStackFrame.pu16[2] = fEfl;
5035 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5036 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5037 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5038 if (fEfl & X86_EFL_VM)
5039 {
5040 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5041 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5042 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5043 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5044 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5045 }
5046 }
5047 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5048 if (rcStrict != VINF_SUCCESS)
5049 return rcStrict;
5050
5051 /* Mark the selectors 'accessed' (hope this is the correct time). */
5052 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5053 * after pushing the stack frame? (Write protect the gdt + stack to
5054 * find out.) */
5055 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5056 {
5057 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5058 if (rcStrict != VINF_SUCCESS)
5059 return rcStrict;
5060 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5061 }
5062
5063 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5064 {
5065 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5066 if (rcStrict != VINF_SUCCESS)
5067 return rcStrict;
5068 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5069 }
5070
5071 /*
5072 * Start comitting the register changes (joins with the DPL=CPL branch).
5073 */
5074 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5075 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5076 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5077 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5078 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5079 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5080 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5081 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5082 * SP is loaded).
5083 * Need to check the other combinations too:
5084 * - 16-bit TSS, 32-bit handler
5085 * - 32-bit TSS, 16-bit handler */
5086 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5087 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5088 else
5089 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5090
5091 if (fEfl & X86_EFL_VM)
5092 {
5093 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5094 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5095 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5096 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5097 }
5098 }
5099 /*
5100 * Same privilege, no stack change and smaller stack frame.
5101 */
5102 else
5103 {
5104 uint64_t uNewRsp;
5105 RTPTRUNION uStackFrame;
5106 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5107 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5108 if (rcStrict != VINF_SUCCESS)
5109 return rcStrict;
5110 void * const pvStackFrame = uStackFrame.pv;
5111
5112 if (f32BitGate)
5113 {
5114 if (fFlags & IEM_XCPT_FLAGS_ERR)
5115 *uStackFrame.pu32++ = uErr;
5116 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5117 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5118 uStackFrame.pu32[2] = fEfl;
5119 }
5120 else
5121 {
5122 if (fFlags & IEM_XCPT_FLAGS_ERR)
5123 *uStackFrame.pu16++ = uErr;
5124 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5125 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5126 uStackFrame.pu16[2] = fEfl;
5127 }
5128 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5129 if (rcStrict != VINF_SUCCESS)
5130 return rcStrict;
5131
5132 /* Mark the CS selector as 'accessed'. */
5133 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5134 {
5135 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5136 if (rcStrict != VINF_SUCCESS)
5137 return rcStrict;
5138 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5139 }
5140
5141 /*
5142 * Start committing the register changes (joins with the other branch).
5143 */
5144 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5145 }
5146
5147 /* ... register committing continues. */
5148 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5149 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5150 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5151 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5152 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5153 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5154
5155 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5156 fEfl &= ~fEflToClear;
5157 IEMMISC_SET_EFL(pVCpu, fEfl);
5158
5159 if (fFlags & IEM_XCPT_FLAGS_CR2)
5160 pVCpu->cpum.GstCtx.cr2 = uCr2;
5161
5162 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5163 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5164
5165 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5166}
5167
5168
5169/**
5170 * Implements exceptions and interrupts for long mode.
5171 *
5172 * @returns VBox strict status code.
5173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5174 * @param cbInstr The number of bytes to offset rIP by in the return
5175 * address.
5176 * @param u8Vector The interrupt / exception vector number.
5177 * @param fFlags The flags.
5178 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5179 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5180 */
5181IEM_STATIC VBOXSTRICTRC
5182iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5183 uint8_t cbInstr,
5184 uint8_t u8Vector,
5185 uint32_t fFlags,
5186 uint16_t uErr,
5187 uint64_t uCr2)
5188{
5189 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5190
5191 /*
5192 * Read the IDT entry.
5193 */
5194 uint16_t offIdt = (uint16_t)u8Vector << 4;
5195 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5196 {
5197 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5198 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5199 }
5200 X86DESC64 Idte;
5201 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5202 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5203 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5204 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5205 {
5206 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5207 return rcStrict;
5208 }
5209 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5210 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5211 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5212
5213 /*
5214 * Check the descriptor type, DPL and such.
5215 * ASSUMES this is done in the same order as described for call-gate calls.
5216 */
5217 if (Idte.Gate.u1DescType)
5218 {
5219 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5220 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5221 }
5222 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5223 switch (Idte.Gate.u4Type)
5224 {
5225 case AMD64_SEL_TYPE_SYS_INT_GATE:
5226 fEflToClear |= X86_EFL_IF;
5227 break;
5228 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5229 break;
5230
5231 default:
5232 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5233 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5234 }
5235
5236 /* Check DPL against CPL if applicable. */
5237 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5238 {
5239 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5240 {
5241 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5242 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5243 }
5244 }
5245
5246 /* Is it there? */
5247 if (!Idte.Gate.u1Present)
5248 {
5249 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5250 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5251 }
5252
5253 /* A null CS is bad. */
5254 RTSEL NewCS = Idte.Gate.u16Sel;
5255 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5256 {
5257 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5258 return iemRaiseGeneralProtectionFault0(pVCpu);
5259 }
5260
5261 /* Fetch the descriptor for the new CS. */
5262 IEMSELDESC DescCS;
5263 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5264 if (rcStrict != VINF_SUCCESS)
5265 {
5266 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5267 return rcStrict;
5268 }
5269
5270 /* Must be a 64-bit code segment. */
5271 if (!DescCS.Long.Gen.u1DescType)
5272 {
5273 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5274 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5275 }
5276 if ( !DescCS.Long.Gen.u1Long
5277 || DescCS.Long.Gen.u1DefBig
5278 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5279 {
5280 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5281 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5282 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5283 }
5284
5285 /* Don't allow lowering the privilege level. For non-conforming CS
5286 selectors, the CS.DPL sets the privilege level the trap/interrupt
5287 handler runs at. For conforming CS selectors, the CPL remains
5288 unchanged, but the CS.DPL must be <= CPL. */
5289 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5290 * when CPU in Ring-0. Result \#GP? */
5291 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5292 {
5293 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5294 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5295 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5296 }
5297
5298
5299 /* Make sure the selector is present. */
5300 if (!DescCS.Legacy.Gen.u1Present)
5301 {
5302 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5303 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5304 }
5305
5306 /* Check that the new RIP is canonical. */
5307 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5308 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5309 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5310 if (!IEM_IS_CANONICAL(uNewRip))
5311 {
5312 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5313 return iemRaiseGeneralProtectionFault0(pVCpu);
5314 }
5315
5316 /*
5317 * If the privilege level changes or if the IST isn't zero, we need to get
5318 * a new stack from the TSS.
5319 */
5320 uint64_t uNewRsp;
5321 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5322 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5323 if ( uNewCpl != pVCpu->iem.s.uCpl
5324 || Idte.Gate.u3IST != 0)
5325 {
5326 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5327 if (rcStrict != VINF_SUCCESS)
5328 return rcStrict;
5329 }
5330 else
5331 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5332 uNewRsp &= ~(uint64_t)0xf;
5333
5334 /*
5335 * Calc the flag image to push.
5336 */
5337 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5338 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5339 fEfl &= ~X86_EFL_RF;
5340 else
5341 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5342
5343 /*
5344 * Start making changes.
5345 */
5346 /* Set the new CPL so that stack accesses use it. */
5347 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5348 pVCpu->iem.s.uCpl = uNewCpl;
5349
5350 /* Create the stack frame. */
5351 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5352 RTPTRUNION uStackFrame;
5353 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5354 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5355 if (rcStrict != VINF_SUCCESS)
5356 return rcStrict;
5357 void * const pvStackFrame = uStackFrame.pv;
5358
5359 if (fFlags & IEM_XCPT_FLAGS_ERR)
5360 *uStackFrame.pu64++ = uErr;
5361 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5362 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5363 uStackFrame.pu64[2] = fEfl;
5364 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5365 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5366 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5367 if (rcStrict != VINF_SUCCESS)
5368 return rcStrict;
5369
5370 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5371 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5372 * after pushing the stack frame? (Write protect the gdt + stack to
5373 * find out.) */
5374 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5375 {
5376 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5377 if (rcStrict != VINF_SUCCESS)
5378 return rcStrict;
5379 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5380 }
5381
5382 /*
5383 * Start comitting the register changes.
5384 */
5385 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5386 * hidden registers when interrupting 32-bit or 16-bit code! */
5387 if (uNewCpl != uOldCpl)
5388 {
5389 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5390 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5391 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5392 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5393 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5394 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5395 }
5396 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5397 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5398 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5399 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5400 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5401 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5402 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5403 pVCpu->cpum.GstCtx.rip = uNewRip;
5404
5405 fEfl &= ~fEflToClear;
5406 IEMMISC_SET_EFL(pVCpu, fEfl);
5407
5408 if (fFlags & IEM_XCPT_FLAGS_CR2)
5409 pVCpu->cpum.GstCtx.cr2 = uCr2;
5410
5411 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5412 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5413
5414 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5415}
5416
5417
5418/**
5419 * Implements exceptions and interrupts.
5420 *
5421 * All exceptions and interrupts goes thru this function!
5422 *
5423 * @returns VBox strict status code.
5424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5425 * @param cbInstr The number of bytes to offset rIP by in the return
5426 * address.
5427 * @param u8Vector The interrupt / exception vector number.
5428 * @param fFlags The flags.
5429 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5430 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5431 */
5432DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5433iemRaiseXcptOrInt(PVMCPU pVCpu,
5434 uint8_t cbInstr,
5435 uint8_t u8Vector,
5436 uint32_t fFlags,
5437 uint16_t uErr,
5438 uint64_t uCr2)
5439{
5440 /*
5441 * Get all the state that we might need here.
5442 */
5443 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5444 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5445
5446#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5447 /*
5448 * Flush prefetch buffer
5449 */
5450 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5451#endif
5452
5453 /*
5454 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5455 */
5456 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5457 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5458 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5459 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5460 {
5461 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5462 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5463 u8Vector = X86_XCPT_GP;
5464 uErr = 0;
5465 }
5466#ifdef DBGFTRACE_ENABLED
5467 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5468 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5469 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5470#endif
5471
5472#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5473 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5474 {
5475 /*
5476 * If the event is being injected as part of VMRUN, it isn't subject to event
5477 * intercepts in the nested-guest. However, secondary exceptions that occur
5478 * during injection of any event -are- subject to exception intercepts.
5479 * See AMD spec. 15.20 "Event Injection".
5480 */
5481 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5482 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = 1;
5483 else
5484 {
5485 /*
5486 * Check and handle if the event being raised is intercepted.
5487 */
5488 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5489 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5490 return rcStrict0;
5491 }
5492 }
5493#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5494
5495 /*
5496 * Do recursion accounting.
5497 */
5498 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5499 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5500 if (pVCpu->iem.s.cXcptRecursions == 0)
5501 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5502 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5503 else
5504 {
5505 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5506 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5507 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5508
5509 if (pVCpu->iem.s.cXcptRecursions >= 4)
5510 {
5511#ifdef DEBUG_bird
5512 AssertFailed();
5513#endif
5514 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5515 }
5516
5517 /*
5518 * Evaluate the sequence of recurring events.
5519 */
5520 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5521 NULL /* pXcptRaiseInfo */);
5522 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5523 { /* likely */ }
5524 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5525 {
5526 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5527 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5528 u8Vector = X86_XCPT_DF;
5529 uErr = 0;
5530 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5531 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5532 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5533 }
5534 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5535 {
5536 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5537 return iemInitiateCpuShutdown(pVCpu);
5538 }
5539 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5540 {
5541 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5542 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5543 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5544 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5545 return VERR_EM_GUEST_CPU_HANG;
5546 }
5547 else
5548 {
5549 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5550 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5551 return VERR_IEM_IPE_9;
5552 }
5553
5554 /*
5555 * The 'EXT' bit is set when an exception occurs during deliver of an external
5556 * event (such as an interrupt or earlier exception)[1]. Privileged software
5557 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5558 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5559 *
5560 * [1] - Intel spec. 6.13 "Error Code"
5561 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5562 * [3] - Intel Instruction reference for INT n.
5563 */
5564 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5565 && (fFlags & IEM_XCPT_FLAGS_ERR)
5566 && u8Vector != X86_XCPT_PF
5567 && u8Vector != X86_XCPT_DF)
5568 {
5569 uErr |= X86_TRAP_ERR_EXTERNAL;
5570 }
5571 }
5572
5573 pVCpu->iem.s.cXcptRecursions++;
5574 pVCpu->iem.s.uCurXcpt = u8Vector;
5575 pVCpu->iem.s.fCurXcpt = fFlags;
5576 pVCpu->iem.s.uCurXcptErr = uErr;
5577 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5578
5579 /*
5580 * Extensive logging.
5581 */
5582#if defined(LOG_ENABLED) && defined(IN_RING3)
5583 if (LogIs3Enabled())
5584 {
5585 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5586 PVM pVM = pVCpu->CTX_SUFF(pVM);
5587 char szRegs[4096];
5588 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5589 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5590 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5591 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5592 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5593 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5594 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5595 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5596 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5597 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5598 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5599 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5600 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5601 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5602 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5603 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5604 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5605 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5606 " efer=%016VR{efer}\n"
5607 " pat=%016VR{pat}\n"
5608 " sf_mask=%016VR{sf_mask}\n"
5609 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5610 " lstar=%016VR{lstar}\n"
5611 " star=%016VR{star} cstar=%016VR{cstar}\n"
5612 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5613 );
5614
5615 char szInstr[256];
5616 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5617 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5618 szInstr, sizeof(szInstr), NULL);
5619 Log3(("%s%s\n", szRegs, szInstr));
5620 }
5621#endif /* LOG_ENABLED */
5622
5623 /*
5624 * Call the mode specific worker function.
5625 */
5626 VBOXSTRICTRC rcStrict;
5627 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5628 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5629 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5630 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5631 else
5632 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5633
5634 /* Flush the prefetch buffer. */
5635#ifdef IEM_WITH_CODE_TLB
5636 pVCpu->iem.s.pbInstrBuf = NULL;
5637#else
5638 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5639#endif
5640
5641 /*
5642 * Unwind.
5643 */
5644 pVCpu->iem.s.cXcptRecursions--;
5645 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5646 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5647 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5648 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5649 pVCpu->iem.s.cXcptRecursions + 1));
5650 return rcStrict;
5651}
5652
5653#ifdef IEM_WITH_SETJMP
5654/**
5655 * See iemRaiseXcptOrInt. Will not return.
5656 */
5657IEM_STATIC DECL_NO_RETURN(void)
5658iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5659 uint8_t cbInstr,
5660 uint8_t u8Vector,
5661 uint32_t fFlags,
5662 uint16_t uErr,
5663 uint64_t uCr2)
5664{
5665 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5666 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5667}
5668#endif
5669
5670
5671/** \#DE - 00. */
5672DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5673{
5674 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5675}
5676
5677
5678/** \#DB - 01.
5679 * @note This automatically clear DR7.GD. */
5680DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5681{
5682 /** @todo set/clear RF. */
5683 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5684 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5685}
5686
5687
5688/** \#BR - 05. */
5689DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5690{
5691 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5692}
5693
5694
5695/** \#UD - 06. */
5696DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5697{
5698 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5699}
5700
5701
5702/** \#NM - 07. */
5703DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5704{
5705 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5706}
5707
5708
5709/** \#TS(err) - 0a. */
5710DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5711{
5712 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5713}
5714
5715
5716/** \#TS(tr) - 0a. */
5717DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5718{
5719 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5720 pVCpu->cpum.GstCtx.tr.Sel, 0);
5721}
5722
5723
5724/** \#TS(0) - 0a. */
5725DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5726{
5727 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5728 0, 0);
5729}
5730
5731
5732/** \#TS(err) - 0a. */
5733DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5734{
5735 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5736 uSel & X86_SEL_MASK_OFF_RPL, 0);
5737}
5738
5739
5740/** \#NP(err) - 0b. */
5741DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5742{
5743 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5744}
5745
5746
5747/** \#NP(sel) - 0b. */
5748DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5749{
5750 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5751 uSel & ~X86_SEL_RPL, 0);
5752}
5753
5754
5755/** \#SS(seg) - 0c. */
5756DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5757{
5758 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5759 uSel & ~X86_SEL_RPL, 0);
5760}
5761
5762
5763/** \#SS(err) - 0c. */
5764DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5765{
5766 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5767}
5768
5769
5770/** \#GP(n) - 0d. */
5771DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5772{
5773 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5774}
5775
5776
5777/** \#GP(0) - 0d. */
5778DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5779{
5780 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5781}
5782
5783#ifdef IEM_WITH_SETJMP
5784/** \#GP(0) - 0d. */
5785DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5786{
5787 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5788}
5789#endif
5790
5791
5792/** \#GP(sel) - 0d. */
5793DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5794{
5795 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5796 Sel & ~X86_SEL_RPL, 0);
5797}
5798
5799
5800/** \#GP(0) - 0d. */
5801DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5802{
5803 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5804}
5805
5806
5807/** \#GP(sel) - 0d. */
5808DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5809{
5810 NOREF(iSegReg); NOREF(fAccess);
5811 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5812 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5813}
5814
5815#ifdef IEM_WITH_SETJMP
5816/** \#GP(sel) - 0d, longjmp. */
5817DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5818{
5819 NOREF(iSegReg); NOREF(fAccess);
5820 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5821 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5822}
5823#endif
5824
5825/** \#GP(sel) - 0d. */
5826DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5827{
5828 NOREF(Sel);
5829 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5830}
5831
5832#ifdef IEM_WITH_SETJMP
5833/** \#GP(sel) - 0d, longjmp. */
5834DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5835{
5836 NOREF(Sel);
5837 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5838}
5839#endif
5840
5841
5842/** \#GP(sel) - 0d. */
5843DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5844{
5845 NOREF(iSegReg); NOREF(fAccess);
5846 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5847}
5848
5849#ifdef IEM_WITH_SETJMP
5850/** \#GP(sel) - 0d, longjmp. */
5851DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5852 uint32_t fAccess)
5853{
5854 NOREF(iSegReg); NOREF(fAccess);
5855 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5856}
5857#endif
5858
5859
5860/** \#PF(n) - 0e. */
5861DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5862{
5863 uint16_t uErr;
5864 switch (rc)
5865 {
5866 case VERR_PAGE_NOT_PRESENT:
5867 case VERR_PAGE_TABLE_NOT_PRESENT:
5868 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5869 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5870 uErr = 0;
5871 break;
5872
5873 default:
5874 AssertMsgFailed(("%Rrc\n", rc));
5875 RT_FALL_THRU();
5876 case VERR_ACCESS_DENIED:
5877 uErr = X86_TRAP_PF_P;
5878 break;
5879
5880 /** @todo reserved */
5881 }
5882
5883 if (pVCpu->iem.s.uCpl == 3)
5884 uErr |= X86_TRAP_PF_US;
5885
5886 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5887 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5888 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5889 uErr |= X86_TRAP_PF_ID;
5890
5891#if 0 /* This is so much non-sense, really. Why was it done like that? */
5892 /* Note! RW access callers reporting a WRITE protection fault, will clear
5893 the READ flag before calling. So, read-modify-write accesses (RW)
5894 can safely be reported as READ faults. */
5895 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5896 uErr |= X86_TRAP_PF_RW;
5897#else
5898 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5899 {
5900 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5901 uErr |= X86_TRAP_PF_RW;
5902 }
5903#endif
5904
5905 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5906 uErr, GCPtrWhere);
5907}
5908
5909#ifdef IEM_WITH_SETJMP
5910/** \#PF(n) - 0e, longjmp. */
5911IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5912{
5913 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5914}
5915#endif
5916
5917
5918/** \#MF(0) - 10. */
5919DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5920{
5921 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5922}
5923
5924
5925/** \#AC(0) - 11. */
5926DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5927{
5928 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5929}
5930
5931
5932/**
5933 * Macro for calling iemCImplRaiseDivideError().
5934 *
5935 * This enables us to add/remove arguments and force different levels of
5936 * inlining as we wish.
5937 *
5938 * @return Strict VBox status code.
5939 */
5940#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5941IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5942{
5943 NOREF(cbInstr);
5944 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5945}
5946
5947
5948/**
5949 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5950 *
5951 * This enables us to add/remove arguments and force different levels of
5952 * inlining as we wish.
5953 *
5954 * @return Strict VBox status code.
5955 */
5956#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5957IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5958{
5959 NOREF(cbInstr);
5960 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5961}
5962
5963
5964/**
5965 * Macro for calling iemCImplRaiseInvalidOpcode().
5966 *
5967 * This enables us to add/remove arguments and force different levels of
5968 * inlining as we wish.
5969 *
5970 * @return Strict VBox status code.
5971 */
5972#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5973IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5974{
5975 NOREF(cbInstr);
5976 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5977}
5978
5979
5980/** @} */
5981
5982
5983/*
5984 *
5985 * Helpers routines.
5986 * Helpers routines.
5987 * Helpers routines.
5988 *
5989 */
5990
5991/**
5992 * Recalculates the effective operand size.
5993 *
5994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5995 */
5996IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5997{
5998 switch (pVCpu->iem.s.enmCpuMode)
5999 {
6000 case IEMMODE_16BIT:
6001 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6002 break;
6003 case IEMMODE_32BIT:
6004 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6005 break;
6006 case IEMMODE_64BIT:
6007 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6008 {
6009 case 0:
6010 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6011 break;
6012 case IEM_OP_PRF_SIZE_OP:
6013 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6014 break;
6015 case IEM_OP_PRF_SIZE_REX_W:
6016 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6017 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6018 break;
6019 }
6020 break;
6021 default:
6022 AssertFailed();
6023 }
6024}
6025
6026
6027/**
6028 * Sets the default operand size to 64-bit and recalculates the effective
6029 * operand size.
6030 *
6031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6032 */
6033IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6034{
6035 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6036 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6037 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6038 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6039 else
6040 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6041}
6042
6043
6044/*
6045 *
6046 * Common opcode decoders.
6047 * Common opcode decoders.
6048 * Common opcode decoders.
6049 *
6050 */
6051//#include <iprt/mem.h>
6052
6053/**
6054 * Used to add extra details about a stub case.
6055 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6056 */
6057IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6058{
6059#if defined(LOG_ENABLED) && defined(IN_RING3)
6060 PVM pVM = pVCpu->CTX_SUFF(pVM);
6061 char szRegs[4096];
6062 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6063 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6064 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6065 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6066 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6067 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6068 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6069 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6070 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6071 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6072 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6073 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6074 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6075 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6076 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6077 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6078 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6079 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6080 " efer=%016VR{efer}\n"
6081 " pat=%016VR{pat}\n"
6082 " sf_mask=%016VR{sf_mask}\n"
6083 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6084 " lstar=%016VR{lstar}\n"
6085 " star=%016VR{star} cstar=%016VR{cstar}\n"
6086 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6087 );
6088
6089 char szInstr[256];
6090 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6091 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6092 szInstr, sizeof(szInstr), NULL);
6093
6094 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6095#else
6096 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6097#endif
6098}
6099
6100/**
6101 * Complains about a stub.
6102 *
6103 * Providing two versions of this macro, one for daily use and one for use when
6104 * working on IEM.
6105 */
6106#if 0
6107# define IEMOP_BITCH_ABOUT_STUB() \
6108 do { \
6109 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6110 iemOpStubMsg2(pVCpu); \
6111 RTAssertPanic(); \
6112 } while (0)
6113#else
6114# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6115#endif
6116
6117/** Stubs an opcode. */
6118#define FNIEMOP_STUB(a_Name) \
6119 FNIEMOP_DEF(a_Name) \
6120 { \
6121 RT_NOREF_PV(pVCpu); \
6122 IEMOP_BITCH_ABOUT_STUB(); \
6123 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6124 } \
6125 typedef int ignore_semicolon
6126
6127/** Stubs an opcode. */
6128#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6129 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6130 { \
6131 RT_NOREF_PV(pVCpu); \
6132 RT_NOREF_PV(a_Name0); \
6133 IEMOP_BITCH_ABOUT_STUB(); \
6134 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6135 } \
6136 typedef int ignore_semicolon
6137
6138/** Stubs an opcode which currently should raise \#UD. */
6139#define FNIEMOP_UD_STUB(a_Name) \
6140 FNIEMOP_DEF(a_Name) \
6141 { \
6142 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6143 return IEMOP_RAISE_INVALID_OPCODE(); \
6144 } \
6145 typedef int ignore_semicolon
6146
6147/** Stubs an opcode which currently should raise \#UD. */
6148#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6149 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6150 { \
6151 RT_NOREF_PV(pVCpu); \
6152 RT_NOREF_PV(a_Name0); \
6153 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6154 return IEMOP_RAISE_INVALID_OPCODE(); \
6155 } \
6156 typedef int ignore_semicolon
6157
6158
6159
6160/** @name Register Access.
6161 * @{
6162 */
6163
6164/**
6165 * Gets a reference (pointer) to the specified hidden segment register.
6166 *
6167 * @returns Hidden register reference.
6168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6169 * @param iSegReg The segment register.
6170 */
6171IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6172{
6173 Assert(iSegReg < X86_SREG_COUNT);
6174 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6175 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6176
6177#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6178 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6179 { /* likely */ }
6180 else
6181 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6182#else
6183 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6184#endif
6185 return pSReg;
6186}
6187
6188
6189/**
6190 * Ensures that the given hidden segment register is up to date.
6191 *
6192 * @returns Hidden register reference.
6193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6194 * @param pSReg The segment register.
6195 */
6196IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6197{
6198#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6199 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6200 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6201#else
6202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6203 NOREF(pVCpu);
6204#endif
6205 return pSReg;
6206}
6207
6208
6209/**
6210 * Gets a reference (pointer) to the specified segment register (the selector
6211 * value).
6212 *
6213 * @returns Pointer to the selector variable.
6214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6215 * @param iSegReg The segment register.
6216 */
6217DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6218{
6219 Assert(iSegReg < X86_SREG_COUNT);
6220 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6221 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6222}
6223
6224
6225/**
6226 * Fetches the selector value of a segment register.
6227 *
6228 * @returns The selector value.
6229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6230 * @param iSegReg The segment register.
6231 */
6232DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6233{
6234 Assert(iSegReg < X86_SREG_COUNT);
6235 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6236 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6237}
6238
6239
6240/**
6241 * Fetches the base address value of a segment register.
6242 *
6243 * @returns The selector value.
6244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6245 * @param iSegReg The segment register.
6246 */
6247DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6248{
6249 Assert(iSegReg < X86_SREG_COUNT);
6250 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6251 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6252}
6253
6254
6255/**
6256 * Gets a reference (pointer) to the specified general purpose register.
6257 *
6258 * @returns Register reference.
6259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6260 * @param iReg The general purpose register.
6261 */
6262DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6263{
6264 Assert(iReg < 16);
6265 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6266}
6267
6268
6269/**
6270 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6271 *
6272 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6273 *
6274 * @returns Register reference.
6275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6276 * @param iReg The register.
6277 */
6278DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6279{
6280 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6281 {
6282 Assert(iReg < 16);
6283 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6284 }
6285 /* high 8-bit register. */
6286 Assert(iReg < 8);
6287 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6288}
6289
6290
6291/**
6292 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6293 *
6294 * @returns Register reference.
6295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6296 * @param iReg The register.
6297 */
6298DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6299{
6300 Assert(iReg < 16);
6301 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6302}
6303
6304
6305/**
6306 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6307 *
6308 * @returns Register reference.
6309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6310 * @param iReg The register.
6311 */
6312DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6313{
6314 Assert(iReg < 16);
6315 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6316}
6317
6318
6319/**
6320 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6321 *
6322 * @returns Register reference.
6323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6324 * @param iReg The register.
6325 */
6326DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6327{
6328 Assert(iReg < 64);
6329 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6330}
6331
6332
6333/**
6334 * Gets a reference (pointer) to the specified segment register's base address.
6335 *
6336 * @returns Segment register base address reference.
6337 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6338 * @param iSegReg The segment selector.
6339 */
6340DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6341{
6342 Assert(iSegReg < X86_SREG_COUNT);
6343 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6344 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6345}
6346
6347
6348/**
6349 * Fetches the value of a 8-bit general purpose register.
6350 *
6351 * @returns The register value.
6352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6353 * @param iReg The register.
6354 */
6355DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6356{
6357 return *iemGRegRefU8(pVCpu, iReg);
6358}
6359
6360
6361/**
6362 * Fetches the value of a 16-bit general purpose register.
6363 *
6364 * @returns The register value.
6365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6366 * @param iReg The register.
6367 */
6368DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6369{
6370 Assert(iReg < 16);
6371 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6372}
6373
6374
6375/**
6376 * Fetches the value of a 32-bit general purpose register.
6377 *
6378 * @returns The register value.
6379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6380 * @param iReg The register.
6381 */
6382DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6383{
6384 Assert(iReg < 16);
6385 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6386}
6387
6388
6389/**
6390 * Fetches the value of a 64-bit general purpose register.
6391 *
6392 * @returns The register value.
6393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6394 * @param iReg The register.
6395 */
6396DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6397{
6398 Assert(iReg < 16);
6399 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6400}
6401
6402
6403/**
6404 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6405 *
6406 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6407 * segment limit.
6408 *
6409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6410 * @param offNextInstr The offset of the next instruction.
6411 */
6412IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6413{
6414 switch (pVCpu->iem.s.enmEffOpSize)
6415 {
6416 case IEMMODE_16BIT:
6417 {
6418 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6419 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6420 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6421 return iemRaiseGeneralProtectionFault0(pVCpu);
6422 pVCpu->cpum.GstCtx.rip = uNewIp;
6423 break;
6424 }
6425
6426 case IEMMODE_32BIT:
6427 {
6428 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6429 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6430
6431 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6432 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6433 return iemRaiseGeneralProtectionFault0(pVCpu);
6434 pVCpu->cpum.GstCtx.rip = uNewEip;
6435 break;
6436 }
6437
6438 case IEMMODE_64BIT:
6439 {
6440 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6441
6442 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6443 if (!IEM_IS_CANONICAL(uNewRip))
6444 return iemRaiseGeneralProtectionFault0(pVCpu);
6445 pVCpu->cpum.GstCtx.rip = uNewRip;
6446 break;
6447 }
6448
6449 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6450 }
6451
6452 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6453
6454#ifndef IEM_WITH_CODE_TLB
6455 /* Flush the prefetch buffer. */
6456 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6457#endif
6458
6459 return VINF_SUCCESS;
6460}
6461
6462
6463/**
6464 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6465 *
6466 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6467 * segment limit.
6468 *
6469 * @returns Strict VBox status code.
6470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6471 * @param offNextInstr The offset of the next instruction.
6472 */
6473IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6474{
6475 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6476
6477 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6478 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6479 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6480 return iemRaiseGeneralProtectionFault0(pVCpu);
6481 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6482 pVCpu->cpum.GstCtx.rip = uNewIp;
6483 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6484
6485#ifndef IEM_WITH_CODE_TLB
6486 /* Flush the prefetch buffer. */
6487 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6488#endif
6489
6490 return VINF_SUCCESS;
6491}
6492
6493
6494/**
6495 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6496 *
6497 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6498 * segment limit.
6499 *
6500 * @returns Strict VBox status code.
6501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6502 * @param offNextInstr The offset of the next instruction.
6503 */
6504IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6505{
6506 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6507
6508 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6509 {
6510 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6511
6512 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6513 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6514 return iemRaiseGeneralProtectionFault0(pVCpu);
6515 pVCpu->cpum.GstCtx.rip = uNewEip;
6516 }
6517 else
6518 {
6519 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6520
6521 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6522 if (!IEM_IS_CANONICAL(uNewRip))
6523 return iemRaiseGeneralProtectionFault0(pVCpu);
6524 pVCpu->cpum.GstCtx.rip = uNewRip;
6525 }
6526 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6527
6528#ifndef IEM_WITH_CODE_TLB
6529 /* Flush the prefetch buffer. */
6530 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6531#endif
6532
6533 return VINF_SUCCESS;
6534}
6535
6536
6537/**
6538 * Performs a near jump to the specified address.
6539 *
6540 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6541 * segment limit.
6542 *
6543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6544 * @param uNewRip The new RIP value.
6545 */
6546IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6547{
6548 switch (pVCpu->iem.s.enmEffOpSize)
6549 {
6550 case IEMMODE_16BIT:
6551 {
6552 Assert(uNewRip <= UINT16_MAX);
6553 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6554 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6555 return iemRaiseGeneralProtectionFault0(pVCpu);
6556 /** @todo Test 16-bit jump in 64-bit mode. */
6557 pVCpu->cpum.GstCtx.rip = uNewRip;
6558 break;
6559 }
6560
6561 case IEMMODE_32BIT:
6562 {
6563 Assert(uNewRip <= UINT32_MAX);
6564 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6565 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6566
6567 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6568 return iemRaiseGeneralProtectionFault0(pVCpu);
6569 pVCpu->cpum.GstCtx.rip = uNewRip;
6570 break;
6571 }
6572
6573 case IEMMODE_64BIT:
6574 {
6575 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6576
6577 if (!IEM_IS_CANONICAL(uNewRip))
6578 return iemRaiseGeneralProtectionFault0(pVCpu);
6579 pVCpu->cpum.GstCtx.rip = uNewRip;
6580 break;
6581 }
6582
6583 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6584 }
6585
6586 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6587
6588#ifndef IEM_WITH_CODE_TLB
6589 /* Flush the prefetch buffer. */
6590 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6591#endif
6592
6593 return VINF_SUCCESS;
6594}
6595
6596
6597/**
6598 * Get the address of the top of the stack.
6599 *
6600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6601 */
6602DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6603{
6604 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6605 return pVCpu->cpum.GstCtx.rsp;
6606 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6607 return pVCpu->cpum.GstCtx.esp;
6608 return pVCpu->cpum.GstCtx.sp;
6609}
6610
6611
6612/**
6613 * Updates the RIP/EIP/IP to point to the next instruction.
6614 *
6615 * This function leaves the EFLAGS.RF flag alone.
6616 *
6617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6618 * @param cbInstr The number of bytes to add.
6619 */
6620IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6621{
6622 switch (pVCpu->iem.s.enmCpuMode)
6623 {
6624 case IEMMODE_16BIT:
6625 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6626 pVCpu->cpum.GstCtx.eip += cbInstr;
6627 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6628 break;
6629
6630 case IEMMODE_32BIT:
6631 pVCpu->cpum.GstCtx.eip += cbInstr;
6632 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6633 break;
6634
6635 case IEMMODE_64BIT:
6636 pVCpu->cpum.GstCtx.rip += cbInstr;
6637 break;
6638 default: AssertFailed();
6639 }
6640}
6641
6642
6643#if 0
6644/**
6645 * Updates the RIP/EIP/IP to point to the next instruction.
6646 *
6647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6648 */
6649IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6650{
6651 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6652}
6653#endif
6654
6655
6656
6657/**
6658 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6659 *
6660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6661 * @param cbInstr The number of bytes to add.
6662 */
6663IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6664{
6665 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6666
6667 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6668#if ARCH_BITS >= 64
6669 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6670 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6671 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6672#else
6673 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6674 pVCpu->cpum.GstCtx.rip += cbInstr;
6675 else
6676 pVCpu->cpum.GstCtx.eip += cbInstr;
6677#endif
6678}
6679
6680
6681/**
6682 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6683 *
6684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6685 */
6686IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6687{
6688 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6689}
6690
6691
6692/**
6693 * Adds to the stack pointer.
6694 *
6695 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6696 * @param cbToAdd The number of bytes to add (8-bit!).
6697 */
6698DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6699{
6700 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6701 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6702 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6703 pVCpu->cpum.GstCtx.esp += cbToAdd;
6704 else
6705 pVCpu->cpum.GstCtx.sp += cbToAdd;
6706}
6707
6708
6709/**
6710 * Subtracts from the stack pointer.
6711 *
6712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6713 * @param cbToSub The number of bytes to subtract (8-bit!).
6714 */
6715DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6716{
6717 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6718 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6719 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6720 pVCpu->cpum.GstCtx.esp -= cbToSub;
6721 else
6722 pVCpu->cpum.GstCtx.sp -= cbToSub;
6723}
6724
6725
6726/**
6727 * Adds to the temporary stack pointer.
6728 *
6729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6730 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6731 * @param cbToAdd The number of bytes to add (16-bit).
6732 */
6733DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6734{
6735 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6736 pTmpRsp->u += cbToAdd;
6737 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6738 pTmpRsp->DWords.dw0 += cbToAdd;
6739 else
6740 pTmpRsp->Words.w0 += cbToAdd;
6741}
6742
6743
6744/**
6745 * Subtracts from the temporary stack pointer.
6746 *
6747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6748 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6749 * @param cbToSub The number of bytes to subtract.
6750 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6751 * expecting that.
6752 */
6753DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6754{
6755 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6756 pTmpRsp->u -= cbToSub;
6757 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6758 pTmpRsp->DWords.dw0 -= cbToSub;
6759 else
6760 pTmpRsp->Words.w0 -= cbToSub;
6761}
6762
6763
6764/**
6765 * Calculates the effective stack address for a push of the specified size as
6766 * well as the new RSP value (upper bits may be masked).
6767 *
6768 * @returns Effective stack addressf for the push.
6769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6770 * @param cbItem The size of the stack item to pop.
6771 * @param puNewRsp Where to return the new RSP value.
6772 */
6773DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6774{
6775 RTUINT64U uTmpRsp;
6776 RTGCPTR GCPtrTop;
6777 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6778
6779 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6780 GCPtrTop = uTmpRsp.u -= cbItem;
6781 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6782 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6783 else
6784 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6785 *puNewRsp = uTmpRsp.u;
6786 return GCPtrTop;
6787}
6788
6789
6790/**
6791 * Gets the current stack pointer and calculates the value after a pop of the
6792 * specified size.
6793 *
6794 * @returns Current stack pointer.
6795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6796 * @param cbItem The size of the stack item to pop.
6797 * @param puNewRsp Where to return the new RSP value.
6798 */
6799DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6800{
6801 RTUINT64U uTmpRsp;
6802 RTGCPTR GCPtrTop;
6803 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6804
6805 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6806 {
6807 GCPtrTop = uTmpRsp.u;
6808 uTmpRsp.u += cbItem;
6809 }
6810 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6811 {
6812 GCPtrTop = uTmpRsp.DWords.dw0;
6813 uTmpRsp.DWords.dw0 += cbItem;
6814 }
6815 else
6816 {
6817 GCPtrTop = uTmpRsp.Words.w0;
6818 uTmpRsp.Words.w0 += cbItem;
6819 }
6820 *puNewRsp = uTmpRsp.u;
6821 return GCPtrTop;
6822}
6823
6824
6825/**
6826 * Calculates the effective stack address for a push of the specified size as
6827 * well as the new temporary RSP value (upper bits may be masked).
6828 *
6829 * @returns Effective stack addressf for the push.
6830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6831 * @param pTmpRsp The temporary stack pointer. This is updated.
6832 * @param cbItem The size of the stack item to pop.
6833 */
6834DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6835{
6836 RTGCPTR GCPtrTop;
6837
6838 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6839 GCPtrTop = pTmpRsp->u -= cbItem;
6840 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6841 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6842 else
6843 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6844 return GCPtrTop;
6845}
6846
6847
6848/**
6849 * Gets the effective stack address for a pop of the specified size and
6850 * calculates and updates the temporary RSP.
6851 *
6852 * @returns Current stack pointer.
6853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6854 * @param pTmpRsp The temporary stack pointer. This is updated.
6855 * @param cbItem The size of the stack item to pop.
6856 */
6857DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6858{
6859 RTGCPTR GCPtrTop;
6860 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6861 {
6862 GCPtrTop = pTmpRsp->u;
6863 pTmpRsp->u += cbItem;
6864 }
6865 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6866 {
6867 GCPtrTop = pTmpRsp->DWords.dw0;
6868 pTmpRsp->DWords.dw0 += cbItem;
6869 }
6870 else
6871 {
6872 GCPtrTop = pTmpRsp->Words.w0;
6873 pTmpRsp->Words.w0 += cbItem;
6874 }
6875 return GCPtrTop;
6876}
6877
6878/** @} */
6879
6880
6881/** @name FPU access and helpers.
6882 *
6883 * @{
6884 */
6885
6886
6887/**
6888 * Hook for preparing to use the host FPU.
6889 *
6890 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6891 *
6892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6893 */
6894DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6895{
6896#ifdef IN_RING3
6897 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6898#else
6899 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6900#endif
6901 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6902}
6903
6904
6905/**
6906 * Hook for preparing to use the host FPU for SSE.
6907 *
6908 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6909 *
6910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6911 */
6912DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6913{
6914 iemFpuPrepareUsage(pVCpu);
6915}
6916
6917
6918/**
6919 * Hook for preparing to use the host FPU for AVX.
6920 *
6921 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6922 *
6923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6924 */
6925DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6926{
6927 iemFpuPrepareUsage(pVCpu);
6928}
6929
6930
6931/**
6932 * Hook for actualizing the guest FPU state before the interpreter reads it.
6933 *
6934 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6935 *
6936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6937 */
6938DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6939{
6940#ifdef IN_RING3
6941 NOREF(pVCpu);
6942#else
6943 CPUMRZFpuStateActualizeForRead(pVCpu);
6944#endif
6945 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6946}
6947
6948
6949/**
6950 * Hook for actualizing the guest FPU state before the interpreter changes it.
6951 *
6952 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6953 *
6954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6955 */
6956DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6957{
6958#ifdef IN_RING3
6959 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6960#else
6961 CPUMRZFpuStateActualizeForChange(pVCpu);
6962#endif
6963 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6964}
6965
6966
6967/**
6968 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6969 * only.
6970 *
6971 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6972 *
6973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6974 */
6975DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6976{
6977#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6978 NOREF(pVCpu);
6979#else
6980 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6981#endif
6982 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6983}
6984
6985
6986/**
6987 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6988 * read+write.
6989 *
6990 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6991 *
6992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6993 */
6994DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6995{
6996#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6997 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6998#else
6999 CPUMRZFpuStateActualizeForChange(pVCpu);
7000#endif
7001 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7002}
7003
7004
7005/**
7006 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7007 * only.
7008 *
7009 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7010 *
7011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7012 */
7013DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7014{
7015#ifdef IN_RING3
7016 NOREF(pVCpu);
7017#else
7018 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7019#endif
7020 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7021}
7022
7023
7024/**
7025 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7026 * read+write.
7027 *
7028 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7029 *
7030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7031 */
7032DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7033{
7034#ifdef IN_RING3
7035 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7036#else
7037 CPUMRZFpuStateActualizeForChange(pVCpu);
7038#endif
7039 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7040}
7041
7042
7043/**
7044 * Stores a QNaN value into a FPU register.
7045 *
7046 * @param pReg Pointer to the register.
7047 */
7048DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7049{
7050 pReg->au32[0] = UINT32_C(0x00000000);
7051 pReg->au32[1] = UINT32_C(0xc0000000);
7052 pReg->au16[4] = UINT16_C(0xffff);
7053}
7054
7055
7056/**
7057 * Updates the FOP, FPU.CS and FPUIP registers.
7058 *
7059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7060 * @param pFpuCtx The FPU context.
7061 */
7062DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7063{
7064 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7065 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7066 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7067 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7068 {
7069 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7070 * happens in real mode here based on the fnsave and fnstenv images. */
7071 pFpuCtx->CS = 0;
7072 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7073 }
7074 else
7075 {
7076 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7077 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7078 }
7079}
7080
7081
7082/**
7083 * Updates the x87.DS and FPUDP registers.
7084 *
7085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7086 * @param pFpuCtx The FPU context.
7087 * @param iEffSeg The effective segment register.
7088 * @param GCPtrEff The effective address relative to @a iEffSeg.
7089 */
7090DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7091{
7092 RTSEL sel;
7093 switch (iEffSeg)
7094 {
7095 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7096 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7097 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7098 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7099 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7100 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7101 default:
7102 AssertMsgFailed(("%d\n", iEffSeg));
7103 sel = pVCpu->cpum.GstCtx.ds.Sel;
7104 }
7105 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7106 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7107 {
7108 pFpuCtx->DS = 0;
7109 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7110 }
7111 else
7112 {
7113 pFpuCtx->DS = sel;
7114 pFpuCtx->FPUDP = GCPtrEff;
7115 }
7116}
7117
7118
7119/**
7120 * Rotates the stack registers in the push direction.
7121 *
7122 * @param pFpuCtx The FPU context.
7123 * @remarks This is a complete waste of time, but fxsave stores the registers in
7124 * stack order.
7125 */
7126DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7127{
7128 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7129 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7130 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7131 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7132 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7133 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7134 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7135 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7136 pFpuCtx->aRegs[0].r80 = r80Tmp;
7137}
7138
7139
7140/**
7141 * Rotates the stack registers in the pop direction.
7142 *
7143 * @param pFpuCtx The FPU context.
7144 * @remarks This is a complete waste of time, but fxsave stores the registers in
7145 * stack order.
7146 */
7147DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7148{
7149 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7150 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7151 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7152 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7153 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7154 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7155 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7156 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7157 pFpuCtx->aRegs[7].r80 = r80Tmp;
7158}
7159
7160
7161/**
7162 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7163 * exception prevents it.
7164 *
7165 * @param pResult The FPU operation result to push.
7166 * @param pFpuCtx The FPU context.
7167 */
7168IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7169{
7170 /* Update FSW and bail if there are pending exceptions afterwards. */
7171 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7172 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7173 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7174 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7175 {
7176 pFpuCtx->FSW = fFsw;
7177 return;
7178 }
7179
7180 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7181 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7182 {
7183 /* All is fine, push the actual value. */
7184 pFpuCtx->FTW |= RT_BIT(iNewTop);
7185 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7186 }
7187 else if (pFpuCtx->FCW & X86_FCW_IM)
7188 {
7189 /* Masked stack overflow, push QNaN. */
7190 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7191 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7192 }
7193 else
7194 {
7195 /* Raise stack overflow, don't push anything. */
7196 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7197 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7198 return;
7199 }
7200
7201 fFsw &= ~X86_FSW_TOP_MASK;
7202 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7203 pFpuCtx->FSW = fFsw;
7204
7205 iemFpuRotateStackPush(pFpuCtx);
7206}
7207
7208
7209/**
7210 * Stores a result in a FPU register and updates the FSW and FTW.
7211 *
7212 * @param pFpuCtx The FPU context.
7213 * @param pResult The result to store.
7214 * @param iStReg Which FPU register to store it in.
7215 */
7216IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7217{
7218 Assert(iStReg < 8);
7219 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7220 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7221 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7222 pFpuCtx->FTW |= RT_BIT(iReg);
7223 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7224}
7225
7226
7227/**
7228 * Only updates the FPU status word (FSW) with the result of the current
7229 * instruction.
7230 *
7231 * @param pFpuCtx The FPU context.
7232 * @param u16FSW The FSW output of the current instruction.
7233 */
7234IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7235{
7236 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7237 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7238}
7239
7240
7241/**
7242 * Pops one item off the FPU stack if no pending exception prevents it.
7243 *
7244 * @param pFpuCtx The FPU context.
7245 */
7246IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7247{
7248 /* Check pending exceptions. */
7249 uint16_t uFSW = pFpuCtx->FSW;
7250 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7251 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7252 return;
7253
7254 /* TOP--. */
7255 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7256 uFSW &= ~X86_FSW_TOP_MASK;
7257 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7258 pFpuCtx->FSW = uFSW;
7259
7260 /* Mark the previous ST0 as empty. */
7261 iOldTop >>= X86_FSW_TOP_SHIFT;
7262 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7263
7264 /* Rotate the registers. */
7265 iemFpuRotateStackPop(pFpuCtx);
7266}
7267
7268
7269/**
7270 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7271 *
7272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7273 * @param pResult The FPU operation result to push.
7274 */
7275IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7276{
7277 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7278 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7279 iemFpuMaybePushResult(pResult, pFpuCtx);
7280}
7281
7282
7283/**
7284 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7285 * and sets FPUDP and FPUDS.
7286 *
7287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7288 * @param pResult The FPU operation result to push.
7289 * @param iEffSeg The effective segment register.
7290 * @param GCPtrEff The effective address relative to @a iEffSeg.
7291 */
7292IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7293{
7294 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7295 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7296 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7297 iemFpuMaybePushResult(pResult, pFpuCtx);
7298}
7299
7300
7301/**
7302 * Replace ST0 with the first value and push the second onto the FPU stack,
7303 * unless a pending exception prevents it.
7304 *
7305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7306 * @param pResult The FPU operation result to store and push.
7307 */
7308IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7309{
7310 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7311 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7312
7313 /* Update FSW and bail if there are pending exceptions afterwards. */
7314 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7315 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7316 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7317 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7318 {
7319 pFpuCtx->FSW = fFsw;
7320 return;
7321 }
7322
7323 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7324 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7325 {
7326 /* All is fine, push the actual value. */
7327 pFpuCtx->FTW |= RT_BIT(iNewTop);
7328 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7329 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7330 }
7331 else if (pFpuCtx->FCW & X86_FCW_IM)
7332 {
7333 /* Masked stack overflow, push QNaN. */
7334 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7335 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7336 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7337 }
7338 else
7339 {
7340 /* Raise stack overflow, don't push anything. */
7341 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7342 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7343 return;
7344 }
7345
7346 fFsw &= ~X86_FSW_TOP_MASK;
7347 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7348 pFpuCtx->FSW = fFsw;
7349
7350 iemFpuRotateStackPush(pFpuCtx);
7351}
7352
7353
7354/**
7355 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7356 * FOP.
7357 *
7358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7359 * @param pResult The result to store.
7360 * @param iStReg Which FPU register to store it in.
7361 */
7362IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7363{
7364 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7365 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7366 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7367}
7368
7369
7370/**
7371 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7372 * FOP, and then pops the stack.
7373 *
7374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7375 * @param pResult The result to store.
7376 * @param iStReg Which FPU register to store it in.
7377 */
7378IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7379{
7380 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7381 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7382 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7383 iemFpuMaybePopOne(pFpuCtx);
7384}
7385
7386
7387/**
7388 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7389 * FPUDP, and FPUDS.
7390 *
7391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7392 * @param pResult The result to store.
7393 * @param iStReg Which FPU register to store it in.
7394 * @param iEffSeg The effective memory operand selector register.
7395 * @param GCPtrEff The effective memory operand offset.
7396 */
7397IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7398 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7399{
7400 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7401 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7402 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7403 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7404}
7405
7406
7407/**
7408 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7409 * FPUDP, and FPUDS, and then pops the stack.
7410 *
7411 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7412 * @param pResult The result to store.
7413 * @param iStReg Which FPU register to store it in.
7414 * @param iEffSeg The effective memory operand selector register.
7415 * @param GCPtrEff The effective memory operand offset.
7416 */
7417IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7418 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7419{
7420 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7421 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7422 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7423 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7424 iemFpuMaybePopOne(pFpuCtx);
7425}
7426
7427
7428/**
7429 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7430 *
7431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7432 */
7433IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7434{
7435 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7436 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7437}
7438
7439
7440/**
7441 * Marks the specified stack register as free (for FFREE).
7442 *
7443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7444 * @param iStReg The register to free.
7445 */
7446IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7447{
7448 Assert(iStReg < 8);
7449 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7450 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7451 pFpuCtx->FTW &= ~RT_BIT(iReg);
7452}
7453
7454
7455/**
7456 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7457 *
7458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7459 */
7460IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7461{
7462 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7463 uint16_t uFsw = pFpuCtx->FSW;
7464 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7465 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7466 uFsw &= ~X86_FSW_TOP_MASK;
7467 uFsw |= uTop;
7468 pFpuCtx->FSW = uFsw;
7469}
7470
7471
7472/**
7473 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7474 *
7475 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7476 */
7477IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7478{
7479 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7480 uint16_t uFsw = pFpuCtx->FSW;
7481 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7482 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7483 uFsw &= ~X86_FSW_TOP_MASK;
7484 uFsw |= uTop;
7485 pFpuCtx->FSW = uFsw;
7486}
7487
7488
7489/**
7490 * Updates the FSW, FOP, FPUIP, and FPUCS.
7491 *
7492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7493 * @param u16FSW The FSW from the current instruction.
7494 */
7495IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7496{
7497 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7498 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7499 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7500}
7501
7502
7503/**
7504 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7505 *
7506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7507 * @param u16FSW The FSW from the current instruction.
7508 */
7509IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7510{
7511 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7512 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7513 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7514 iemFpuMaybePopOne(pFpuCtx);
7515}
7516
7517
7518/**
7519 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7520 *
7521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7522 * @param u16FSW The FSW from the current instruction.
7523 * @param iEffSeg The effective memory operand selector register.
7524 * @param GCPtrEff The effective memory operand offset.
7525 */
7526IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7527{
7528 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7529 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7530 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7531 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7532}
7533
7534
7535/**
7536 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7537 *
7538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7539 * @param u16FSW The FSW from the current instruction.
7540 */
7541IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7542{
7543 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7544 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7545 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7546 iemFpuMaybePopOne(pFpuCtx);
7547 iemFpuMaybePopOne(pFpuCtx);
7548}
7549
7550
7551/**
7552 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7553 *
7554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7555 * @param u16FSW The FSW from the current instruction.
7556 * @param iEffSeg The effective memory operand selector register.
7557 * @param GCPtrEff The effective memory operand offset.
7558 */
7559IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7560{
7561 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7562 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7563 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7564 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7565 iemFpuMaybePopOne(pFpuCtx);
7566}
7567
7568
7569/**
7570 * Worker routine for raising an FPU stack underflow exception.
7571 *
7572 * @param pFpuCtx The FPU context.
7573 * @param iStReg The stack register being accessed.
7574 */
7575IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7576{
7577 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7578 if (pFpuCtx->FCW & X86_FCW_IM)
7579 {
7580 /* Masked underflow. */
7581 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7582 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7583 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7584 if (iStReg != UINT8_MAX)
7585 {
7586 pFpuCtx->FTW |= RT_BIT(iReg);
7587 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7588 }
7589 }
7590 else
7591 {
7592 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7593 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7594 }
7595}
7596
7597
7598/**
7599 * Raises a FPU stack underflow exception.
7600 *
7601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7602 * @param iStReg The destination register that should be loaded
7603 * with QNaN if \#IS is not masked. Specify
7604 * UINT8_MAX if none (like for fcom).
7605 */
7606DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7607{
7608 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7609 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7610 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7611}
7612
7613
7614DECL_NO_INLINE(IEM_STATIC, void)
7615iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7616{
7617 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7618 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7619 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7620 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7621}
7622
7623
7624DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7625{
7626 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7627 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7628 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7629 iemFpuMaybePopOne(pFpuCtx);
7630}
7631
7632
7633DECL_NO_INLINE(IEM_STATIC, void)
7634iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7635{
7636 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7637 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7638 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7639 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7640 iemFpuMaybePopOne(pFpuCtx);
7641}
7642
7643
7644DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7645{
7646 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7647 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7648 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7649 iemFpuMaybePopOne(pFpuCtx);
7650 iemFpuMaybePopOne(pFpuCtx);
7651}
7652
7653
7654DECL_NO_INLINE(IEM_STATIC, void)
7655iemFpuStackPushUnderflow(PVMCPU pVCpu)
7656{
7657 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7658 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7659
7660 if (pFpuCtx->FCW & X86_FCW_IM)
7661 {
7662 /* Masked overflow - Push QNaN. */
7663 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7664 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7665 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7666 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7667 pFpuCtx->FTW |= RT_BIT(iNewTop);
7668 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7669 iemFpuRotateStackPush(pFpuCtx);
7670 }
7671 else
7672 {
7673 /* Exception pending - don't change TOP or the register stack. */
7674 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7675 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7676 }
7677}
7678
7679
7680DECL_NO_INLINE(IEM_STATIC, void)
7681iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7682{
7683 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7684 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7685
7686 if (pFpuCtx->FCW & X86_FCW_IM)
7687 {
7688 /* Masked overflow - Push QNaN. */
7689 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7690 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7691 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7692 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7693 pFpuCtx->FTW |= RT_BIT(iNewTop);
7694 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7695 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7696 iemFpuRotateStackPush(pFpuCtx);
7697 }
7698 else
7699 {
7700 /* Exception pending - don't change TOP or the register stack. */
7701 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7702 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7703 }
7704}
7705
7706
7707/**
7708 * Worker routine for raising an FPU stack overflow exception on a push.
7709 *
7710 * @param pFpuCtx The FPU context.
7711 */
7712IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7713{
7714 if (pFpuCtx->FCW & X86_FCW_IM)
7715 {
7716 /* Masked overflow. */
7717 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7718 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7719 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7720 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7721 pFpuCtx->FTW |= RT_BIT(iNewTop);
7722 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7723 iemFpuRotateStackPush(pFpuCtx);
7724 }
7725 else
7726 {
7727 /* Exception pending - don't change TOP or the register stack. */
7728 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7729 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7730 }
7731}
7732
7733
7734/**
7735 * Raises a FPU stack overflow exception on a push.
7736 *
7737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7738 */
7739DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7740{
7741 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7742 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7743 iemFpuStackPushOverflowOnly(pFpuCtx);
7744}
7745
7746
7747/**
7748 * Raises a FPU stack overflow exception on a push with a memory operand.
7749 *
7750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7751 * @param iEffSeg The effective memory operand selector register.
7752 * @param GCPtrEff The effective memory operand offset.
7753 */
7754DECL_NO_INLINE(IEM_STATIC, void)
7755iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7756{
7757 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7758 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7759 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7760 iemFpuStackPushOverflowOnly(pFpuCtx);
7761}
7762
7763
7764IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7765{
7766 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7767 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7768 if (pFpuCtx->FTW & RT_BIT(iReg))
7769 return VINF_SUCCESS;
7770 return VERR_NOT_FOUND;
7771}
7772
7773
7774IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7775{
7776 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7777 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7778 if (pFpuCtx->FTW & RT_BIT(iReg))
7779 {
7780 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7781 return VINF_SUCCESS;
7782 }
7783 return VERR_NOT_FOUND;
7784}
7785
7786
7787IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7788 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7789{
7790 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7791 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7792 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7793 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7794 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7795 {
7796 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7797 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7798 return VINF_SUCCESS;
7799 }
7800 return VERR_NOT_FOUND;
7801}
7802
7803
7804IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7805{
7806 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7807 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7808 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7809 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7810 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7811 {
7812 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7813 return VINF_SUCCESS;
7814 }
7815 return VERR_NOT_FOUND;
7816}
7817
7818
7819/**
7820 * Updates the FPU exception status after FCW is changed.
7821 *
7822 * @param pFpuCtx The FPU context.
7823 */
7824IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7825{
7826 uint16_t u16Fsw = pFpuCtx->FSW;
7827 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7828 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7829 else
7830 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7831 pFpuCtx->FSW = u16Fsw;
7832}
7833
7834
7835/**
7836 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7837 *
7838 * @returns The full FTW.
7839 * @param pFpuCtx The FPU context.
7840 */
7841IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7842{
7843 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7844 uint16_t u16Ftw = 0;
7845 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7846 for (unsigned iSt = 0; iSt < 8; iSt++)
7847 {
7848 unsigned const iReg = (iSt + iTop) & 7;
7849 if (!(u8Ftw & RT_BIT(iReg)))
7850 u16Ftw |= 3 << (iReg * 2); /* empty */
7851 else
7852 {
7853 uint16_t uTag;
7854 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7855 if (pr80Reg->s.uExponent == 0x7fff)
7856 uTag = 2; /* Exponent is all 1's => Special. */
7857 else if (pr80Reg->s.uExponent == 0x0000)
7858 {
7859 if (pr80Reg->s.u64Mantissa == 0x0000)
7860 uTag = 1; /* All bits are zero => Zero. */
7861 else
7862 uTag = 2; /* Must be special. */
7863 }
7864 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7865 uTag = 0; /* Valid. */
7866 else
7867 uTag = 2; /* Must be special. */
7868
7869 u16Ftw |= uTag << (iReg * 2); /* empty */
7870 }
7871 }
7872
7873 return u16Ftw;
7874}
7875
7876
7877/**
7878 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7879 *
7880 * @returns The compressed FTW.
7881 * @param u16FullFtw The full FTW to convert.
7882 */
7883IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7884{
7885 uint8_t u8Ftw = 0;
7886 for (unsigned i = 0; i < 8; i++)
7887 {
7888 if ((u16FullFtw & 3) != 3 /*empty*/)
7889 u8Ftw |= RT_BIT(i);
7890 u16FullFtw >>= 2;
7891 }
7892
7893 return u8Ftw;
7894}
7895
7896/** @} */
7897
7898
7899/** @name Memory access.
7900 *
7901 * @{
7902 */
7903
7904
7905/**
7906 * Updates the IEMCPU::cbWritten counter if applicable.
7907 *
7908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7909 * @param fAccess The access being accounted for.
7910 * @param cbMem The access size.
7911 */
7912DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7913{
7914 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7915 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7916 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7917}
7918
7919
7920/**
7921 * Checks if the given segment can be written to, raise the appropriate
7922 * exception if not.
7923 *
7924 * @returns VBox strict status code.
7925 *
7926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7927 * @param pHid Pointer to the hidden register.
7928 * @param iSegReg The register number.
7929 * @param pu64BaseAddr Where to return the base address to use for the
7930 * segment. (In 64-bit code it may differ from the
7931 * base in the hidden segment.)
7932 */
7933IEM_STATIC VBOXSTRICTRC
7934iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7935{
7936 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7937
7938 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7939 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7940 else
7941 {
7942 if (!pHid->Attr.n.u1Present)
7943 {
7944 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7945 AssertRelease(uSel == 0);
7946 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7947 return iemRaiseGeneralProtectionFault0(pVCpu);
7948 }
7949
7950 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7951 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7952 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7953 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7954 *pu64BaseAddr = pHid->u64Base;
7955 }
7956 return VINF_SUCCESS;
7957}
7958
7959
7960/**
7961 * Checks if the given segment can be read from, raise the appropriate
7962 * exception if not.
7963 *
7964 * @returns VBox strict status code.
7965 *
7966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7967 * @param pHid Pointer to the hidden register.
7968 * @param iSegReg The register number.
7969 * @param pu64BaseAddr Where to return the base address to use for the
7970 * segment. (In 64-bit code it may differ from the
7971 * base in the hidden segment.)
7972 */
7973IEM_STATIC VBOXSTRICTRC
7974iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7975{
7976 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7977
7978 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7979 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7980 else
7981 {
7982 if (!pHid->Attr.n.u1Present)
7983 {
7984 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7985 AssertRelease(uSel == 0);
7986 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7987 return iemRaiseGeneralProtectionFault0(pVCpu);
7988 }
7989
7990 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7991 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7992 *pu64BaseAddr = pHid->u64Base;
7993 }
7994 return VINF_SUCCESS;
7995}
7996
7997
7998/**
7999 * Applies the segment limit, base and attributes.
8000 *
8001 * This may raise a \#GP or \#SS.
8002 *
8003 * @returns VBox strict status code.
8004 *
8005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8006 * @param fAccess The kind of access which is being performed.
8007 * @param iSegReg The index of the segment register to apply.
8008 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8009 * TSS, ++).
8010 * @param cbMem The access size.
8011 * @param pGCPtrMem Pointer to the guest memory address to apply
8012 * segmentation to. Input and output parameter.
8013 */
8014IEM_STATIC VBOXSTRICTRC
8015iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8016{
8017 if (iSegReg == UINT8_MAX)
8018 return VINF_SUCCESS;
8019
8020 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8021 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8022 switch (pVCpu->iem.s.enmCpuMode)
8023 {
8024 case IEMMODE_16BIT:
8025 case IEMMODE_32BIT:
8026 {
8027 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8028 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8029
8030 if ( pSel->Attr.n.u1Present
8031 && !pSel->Attr.n.u1Unusable)
8032 {
8033 Assert(pSel->Attr.n.u1DescType);
8034 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8035 {
8036 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8037 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8038 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8039
8040 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8041 {
8042 /** @todo CPL check. */
8043 }
8044
8045 /*
8046 * There are two kinds of data selectors, normal and expand down.
8047 */
8048 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8049 {
8050 if ( GCPtrFirst32 > pSel->u32Limit
8051 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8052 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8053 }
8054 else
8055 {
8056 /*
8057 * The upper boundary is defined by the B bit, not the G bit!
8058 */
8059 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8060 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8061 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8062 }
8063 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8064 }
8065 else
8066 {
8067
8068 /*
8069 * Code selector and usually be used to read thru, writing is
8070 * only permitted in real and V8086 mode.
8071 */
8072 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8073 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8074 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8075 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8076 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8077
8078 if ( GCPtrFirst32 > pSel->u32Limit
8079 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8080 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8081
8082 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8083 {
8084 /** @todo CPL check. */
8085 }
8086
8087 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8088 }
8089 }
8090 else
8091 return iemRaiseGeneralProtectionFault0(pVCpu);
8092 return VINF_SUCCESS;
8093 }
8094
8095 case IEMMODE_64BIT:
8096 {
8097 RTGCPTR GCPtrMem = *pGCPtrMem;
8098 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8099 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8100
8101 Assert(cbMem >= 1);
8102 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8103 return VINF_SUCCESS;
8104 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8105 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8106 return iemRaiseGeneralProtectionFault0(pVCpu);
8107 }
8108
8109 default:
8110 AssertFailedReturn(VERR_IEM_IPE_7);
8111 }
8112}
8113
8114
8115/**
8116 * Translates a virtual address to a physical physical address and checks if we
8117 * can access the page as specified.
8118 *
8119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8120 * @param GCPtrMem The virtual address.
8121 * @param fAccess The intended access.
8122 * @param pGCPhysMem Where to return the physical address.
8123 */
8124IEM_STATIC VBOXSTRICTRC
8125iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8126{
8127 /** @todo Need a different PGM interface here. We're currently using
8128 * generic / REM interfaces. this won't cut it for R0 & RC. */
8129 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8130 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8131 RTGCPHYS GCPhys;
8132 uint64_t fFlags;
8133 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8134 if (RT_FAILURE(rc))
8135 {
8136 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8137 /** @todo Check unassigned memory in unpaged mode. */
8138 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8139 *pGCPhysMem = NIL_RTGCPHYS;
8140 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8141 }
8142
8143 /* If the page is writable and does not have the no-exec bit set, all
8144 access is allowed. Otherwise we'll have to check more carefully... */
8145 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8146 {
8147 /* Write to read only memory? */
8148 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8149 && !(fFlags & X86_PTE_RW)
8150 && ( (pVCpu->iem.s.uCpl == 3
8151 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8152 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8153 {
8154 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8155 *pGCPhysMem = NIL_RTGCPHYS;
8156 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8157 }
8158
8159 /* Kernel memory accessed by userland? */
8160 if ( !(fFlags & X86_PTE_US)
8161 && pVCpu->iem.s.uCpl == 3
8162 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8163 {
8164 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8165 *pGCPhysMem = NIL_RTGCPHYS;
8166 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8167 }
8168
8169 /* Executing non-executable memory? */
8170 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8171 && (fFlags & X86_PTE_PAE_NX)
8172 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8173 {
8174 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8175 *pGCPhysMem = NIL_RTGCPHYS;
8176 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8177 VERR_ACCESS_DENIED);
8178 }
8179 }
8180
8181 /*
8182 * Set the dirty / access flags.
8183 * ASSUMES this is set when the address is translated rather than on committ...
8184 */
8185 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8186 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8187 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8188 {
8189 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8190 AssertRC(rc2);
8191 }
8192
8193 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8194 *pGCPhysMem = GCPhys;
8195 return VINF_SUCCESS;
8196}
8197
8198
8199
8200/**
8201 * Maps a physical page.
8202 *
8203 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8205 * @param GCPhysMem The physical address.
8206 * @param fAccess The intended access.
8207 * @param ppvMem Where to return the mapping address.
8208 * @param pLock The PGM lock.
8209 */
8210IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8211{
8212#ifdef IEM_LOG_MEMORY_WRITES
8213 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8214 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8215#endif
8216
8217 /** @todo This API may require some improving later. A private deal with PGM
8218 * regarding locking and unlocking needs to be struct. A couple of TLBs
8219 * living in PGM, but with publicly accessible inlined access methods
8220 * could perhaps be an even better solution. */
8221 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8222 GCPhysMem,
8223 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8224 pVCpu->iem.s.fBypassHandlers,
8225 ppvMem,
8226 pLock);
8227 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8228 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8229
8230 return rc;
8231}
8232
8233
8234/**
8235 * Unmap a page previously mapped by iemMemPageMap.
8236 *
8237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8238 * @param GCPhysMem The physical address.
8239 * @param fAccess The intended access.
8240 * @param pvMem What iemMemPageMap returned.
8241 * @param pLock The PGM lock.
8242 */
8243DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8244{
8245 NOREF(pVCpu);
8246 NOREF(GCPhysMem);
8247 NOREF(fAccess);
8248 NOREF(pvMem);
8249 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8250}
8251
8252
8253/**
8254 * Looks up a memory mapping entry.
8255 *
8256 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8258 * @param pvMem The memory address.
8259 * @param fAccess The access to.
8260 */
8261DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8262{
8263 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8264 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8265 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8266 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8267 return 0;
8268 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8269 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8270 return 1;
8271 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8272 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8273 return 2;
8274 return VERR_NOT_FOUND;
8275}
8276
8277
8278/**
8279 * Finds a free memmap entry when using iNextMapping doesn't work.
8280 *
8281 * @returns Memory mapping index, 1024 on failure.
8282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8283 */
8284IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8285{
8286 /*
8287 * The easy case.
8288 */
8289 if (pVCpu->iem.s.cActiveMappings == 0)
8290 {
8291 pVCpu->iem.s.iNextMapping = 1;
8292 return 0;
8293 }
8294
8295 /* There should be enough mappings for all instructions. */
8296 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8297
8298 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8299 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8300 return i;
8301
8302 AssertFailedReturn(1024);
8303}
8304
8305
8306/**
8307 * Commits a bounce buffer that needs writing back and unmaps it.
8308 *
8309 * @returns Strict VBox status code.
8310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8311 * @param iMemMap The index of the buffer to commit.
8312 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8313 * Always false in ring-3, obviously.
8314 */
8315IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8316{
8317 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8318 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8319#ifdef IN_RING3
8320 Assert(!fPostponeFail);
8321 RT_NOREF_PV(fPostponeFail);
8322#endif
8323
8324 /*
8325 * Do the writing.
8326 */
8327 PVM pVM = pVCpu->CTX_SUFF(pVM);
8328 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8329 {
8330 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8331 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8332 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8333 if (!pVCpu->iem.s.fBypassHandlers)
8334 {
8335 /*
8336 * Carefully and efficiently dealing with access handler return
8337 * codes make this a little bloated.
8338 */
8339 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8340 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8341 pbBuf,
8342 cbFirst,
8343 PGMACCESSORIGIN_IEM);
8344 if (rcStrict == VINF_SUCCESS)
8345 {
8346 if (cbSecond)
8347 {
8348 rcStrict = PGMPhysWrite(pVM,
8349 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8350 pbBuf + cbFirst,
8351 cbSecond,
8352 PGMACCESSORIGIN_IEM);
8353 if (rcStrict == VINF_SUCCESS)
8354 { /* nothing */ }
8355 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8356 {
8357 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8358 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8359 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8360 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8361 }
8362#ifndef IN_RING3
8363 else if (fPostponeFail)
8364 {
8365 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8366 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8367 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8368 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8369 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8370 return iemSetPassUpStatus(pVCpu, rcStrict);
8371 }
8372#endif
8373 else
8374 {
8375 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8376 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8377 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8378 return rcStrict;
8379 }
8380 }
8381 }
8382 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8383 {
8384 if (!cbSecond)
8385 {
8386 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8387 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8388 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8389 }
8390 else
8391 {
8392 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8393 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8394 pbBuf + cbFirst,
8395 cbSecond,
8396 PGMACCESSORIGIN_IEM);
8397 if (rcStrict2 == VINF_SUCCESS)
8398 {
8399 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8401 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8402 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8403 }
8404 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8405 {
8406 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8407 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8408 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8409 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8410 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8411 }
8412#ifndef IN_RING3
8413 else if (fPostponeFail)
8414 {
8415 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8416 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8417 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8418 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8419 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8420 return iemSetPassUpStatus(pVCpu, rcStrict);
8421 }
8422#endif
8423 else
8424 {
8425 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8426 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8427 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8428 return rcStrict2;
8429 }
8430 }
8431 }
8432#ifndef IN_RING3
8433 else if (fPostponeFail)
8434 {
8435 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8436 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8437 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8438 if (!cbSecond)
8439 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8440 else
8441 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8442 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8443 return iemSetPassUpStatus(pVCpu, rcStrict);
8444 }
8445#endif
8446 else
8447 {
8448 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8449 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8450 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8451 return rcStrict;
8452 }
8453 }
8454 else
8455 {
8456 /*
8457 * No access handlers, much simpler.
8458 */
8459 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8460 if (RT_SUCCESS(rc))
8461 {
8462 if (cbSecond)
8463 {
8464 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8465 if (RT_SUCCESS(rc))
8466 { /* likely */ }
8467 else
8468 {
8469 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8470 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8471 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8472 return rc;
8473 }
8474 }
8475 }
8476 else
8477 {
8478 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8480 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8481 return rc;
8482 }
8483 }
8484 }
8485
8486#if defined(IEM_LOG_MEMORY_WRITES)
8487 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8488 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8489 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8490 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8491 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8492 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8493
8494 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8495 g_cbIemWrote = cbWrote;
8496 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8497#endif
8498
8499 /*
8500 * Free the mapping entry.
8501 */
8502 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8503 Assert(pVCpu->iem.s.cActiveMappings != 0);
8504 pVCpu->iem.s.cActiveMappings--;
8505 return VINF_SUCCESS;
8506}
8507
8508
8509/**
8510 * iemMemMap worker that deals with a request crossing pages.
8511 */
8512IEM_STATIC VBOXSTRICTRC
8513iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8514{
8515 /*
8516 * Do the address translations.
8517 */
8518 RTGCPHYS GCPhysFirst;
8519 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8520 if (rcStrict != VINF_SUCCESS)
8521 return rcStrict;
8522
8523 RTGCPHYS GCPhysSecond;
8524 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8525 fAccess, &GCPhysSecond);
8526 if (rcStrict != VINF_SUCCESS)
8527 return rcStrict;
8528 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8529
8530 PVM pVM = pVCpu->CTX_SUFF(pVM);
8531
8532 /*
8533 * Read in the current memory content if it's a read, execute or partial
8534 * write access.
8535 */
8536 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8537 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8538 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8539
8540 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8541 {
8542 if (!pVCpu->iem.s.fBypassHandlers)
8543 {
8544 /*
8545 * Must carefully deal with access handler status codes here,
8546 * makes the code a bit bloated.
8547 */
8548 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8549 if (rcStrict == VINF_SUCCESS)
8550 {
8551 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8552 if (rcStrict == VINF_SUCCESS)
8553 { /*likely */ }
8554 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8555 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8556 else
8557 {
8558 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8559 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8560 return rcStrict;
8561 }
8562 }
8563 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8564 {
8565 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8566 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8567 {
8568 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8569 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8570 }
8571 else
8572 {
8573 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8574 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8575 return rcStrict2;
8576 }
8577 }
8578 else
8579 {
8580 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8581 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8582 return rcStrict;
8583 }
8584 }
8585 else
8586 {
8587 /*
8588 * No informational status codes here, much more straight forward.
8589 */
8590 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8591 if (RT_SUCCESS(rc))
8592 {
8593 Assert(rc == VINF_SUCCESS);
8594 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8595 if (RT_SUCCESS(rc))
8596 Assert(rc == VINF_SUCCESS);
8597 else
8598 {
8599 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8600 return rc;
8601 }
8602 }
8603 else
8604 {
8605 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8606 return rc;
8607 }
8608 }
8609 }
8610#ifdef VBOX_STRICT
8611 else
8612 memset(pbBuf, 0xcc, cbMem);
8613 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8614 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8615#endif
8616
8617 /*
8618 * Commit the bounce buffer entry.
8619 */
8620 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8621 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8622 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8623 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8624 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8625 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8626 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8627 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8628 pVCpu->iem.s.cActiveMappings++;
8629
8630 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8631 *ppvMem = pbBuf;
8632 return VINF_SUCCESS;
8633}
8634
8635
8636/**
8637 * iemMemMap woker that deals with iemMemPageMap failures.
8638 */
8639IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8640 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8641{
8642 /*
8643 * Filter out conditions we can handle and the ones which shouldn't happen.
8644 */
8645 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8646 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8647 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8648 {
8649 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8650 return rcMap;
8651 }
8652 pVCpu->iem.s.cPotentialExits++;
8653
8654 /*
8655 * Read in the current memory content if it's a read, execute or partial
8656 * write access.
8657 */
8658 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8659 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8660 {
8661 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8662 memset(pbBuf, 0xff, cbMem);
8663 else
8664 {
8665 int rc;
8666 if (!pVCpu->iem.s.fBypassHandlers)
8667 {
8668 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8669 if (rcStrict == VINF_SUCCESS)
8670 { /* nothing */ }
8671 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8672 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8673 else
8674 {
8675 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8676 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8677 return rcStrict;
8678 }
8679 }
8680 else
8681 {
8682 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8683 if (RT_SUCCESS(rc))
8684 { /* likely */ }
8685 else
8686 {
8687 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8688 GCPhysFirst, rc));
8689 return rc;
8690 }
8691 }
8692 }
8693 }
8694#ifdef VBOX_STRICT
8695 else
8696 memset(pbBuf, 0xcc, cbMem);
8697#endif
8698#ifdef VBOX_STRICT
8699 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8700 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8701#endif
8702
8703 /*
8704 * Commit the bounce buffer entry.
8705 */
8706 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8707 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8708 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8709 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8710 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8711 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8712 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8713 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8714 pVCpu->iem.s.cActiveMappings++;
8715
8716 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8717 *ppvMem = pbBuf;
8718 return VINF_SUCCESS;
8719}
8720
8721
8722
8723/**
8724 * Maps the specified guest memory for the given kind of access.
8725 *
8726 * This may be using bounce buffering of the memory if it's crossing a page
8727 * boundary or if there is an access handler installed for any of it. Because
8728 * of lock prefix guarantees, we're in for some extra clutter when this
8729 * happens.
8730 *
8731 * This may raise a \#GP, \#SS, \#PF or \#AC.
8732 *
8733 * @returns VBox strict status code.
8734 *
8735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8736 * @param ppvMem Where to return the pointer to the mapped
8737 * memory.
8738 * @param cbMem The number of bytes to map. This is usually 1,
8739 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8740 * string operations it can be up to a page.
8741 * @param iSegReg The index of the segment register to use for
8742 * this access. The base and limits are checked.
8743 * Use UINT8_MAX to indicate that no segmentation
8744 * is required (for IDT, GDT and LDT accesses).
8745 * @param GCPtrMem The address of the guest memory.
8746 * @param fAccess How the memory is being accessed. The
8747 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8748 * how to map the memory, while the
8749 * IEM_ACCESS_WHAT_XXX bit is used when raising
8750 * exceptions.
8751 */
8752IEM_STATIC VBOXSTRICTRC
8753iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8754{
8755 /*
8756 * Check the input and figure out which mapping entry to use.
8757 */
8758 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8759 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8760 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8761
8762 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8763 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8764 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8765 {
8766 iMemMap = iemMemMapFindFree(pVCpu);
8767 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8768 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8769 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8770 pVCpu->iem.s.aMemMappings[2].fAccess),
8771 VERR_IEM_IPE_9);
8772 }
8773
8774 /*
8775 * Map the memory, checking that we can actually access it. If something
8776 * slightly complicated happens, fall back on bounce buffering.
8777 */
8778 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8779 if (rcStrict != VINF_SUCCESS)
8780 return rcStrict;
8781
8782 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8783 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8784
8785 RTGCPHYS GCPhysFirst;
8786 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8787 if (rcStrict != VINF_SUCCESS)
8788 return rcStrict;
8789
8790 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8791 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8792 if (fAccess & IEM_ACCESS_TYPE_READ)
8793 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8794
8795 void *pvMem;
8796 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8797 if (rcStrict != VINF_SUCCESS)
8798 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8799
8800 /*
8801 * Fill in the mapping table entry.
8802 */
8803 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8804 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8805 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8806 pVCpu->iem.s.cActiveMappings++;
8807
8808 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8809 *ppvMem = pvMem;
8810 return VINF_SUCCESS;
8811}
8812
8813
8814/**
8815 * Commits the guest memory if bounce buffered and unmaps it.
8816 *
8817 * @returns Strict VBox status code.
8818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8819 * @param pvMem The mapping.
8820 * @param fAccess The kind of access.
8821 */
8822IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8823{
8824 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8825 AssertReturn(iMemMap >= 0, iMemMap);
8826
8827 /* If it's bounce buffered, we may need to write back the buffer. */
8828 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8829 {
8830 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8831 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8832 }
8833 /* Otherwise unlock it. */
8834 else
8835 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8836
8837 /* Free the entry. */
8838 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8839 Assert(pVCpu->iem.s.cActiveMappings != 0);
8840 pVCpu->iem.s.cActiveMappings--;
8841 return VINF_SUCCESS;
8842}
8843
8844#ifdef IEM_WITH_SETJMP
8845
8846/**
8847 * Maps the specified guest memory for the given kind of access, longjmp on
8848 * error.
8849 *
8850 * This may be using bounce buffering of the memory if it's crossing a page
8851 * boundary or if there is an access handler installed for any of it. Because
8852 * of lock prefix guarantees, we're in for some extra clutter when this
8853 * happens.
8854 *
8855 * This may raise a \#GP, \#SS, \#PF or \#AC.
8856 *
8857 * @returns Pointer to the mapped memory.
8858 *
8859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8860 * @param cbMem The number of bytes to map. This is usually 1,
8861 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8862 * string operations it can be up to a page.
8863 * @param iSegReg The index of the segment register to use for
8864 * this access. The base and limits are checked.
8865 * Use UINT8_MAX to indicate that no segmentation
8866 * is required (for IDT, GDT and LDT accesses).
8867 * @param GCPtrMem The address of the guest memory.
8868 * @param fAccess How the memory is being accessed. The
8869 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8870 * how to map the memory, while the
8871 * IEM_ACCESS_WHAT_XXX bit is used when raising
8872 * exceptions.
8873 */
8874IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8875{
8876 /*
8877 * Check the input and figure out which mapping entry to use.
8878 */
8879 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8880 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8881 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8882
8883 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8884 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8885 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8886 {
8887 iMemMap = iemMemMapFindFree(pVCpu);
8888 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8889 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8890 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8891 pVCpu->iem.s.aMemMappings[2].fAccess),
8892 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8893 }
8894
8895 /*
8896 * Map the memory, checking that we can actually access it. If something
8897 * slightly complicated happens, fall back on bounce buffering.
8898 */
8899 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8900 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8901 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8902
8903 /* Crossing a page boundary? */
8904 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8905 { /* No (likely). */ }
8906 else
8907 {
8908 void *pvMem;
8909 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8910 if (rcStrict == VINF_SUCCESS)
8911 return pvMem;
8912 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8913 }
8914
8915 RTGCPHYS GCPhysFirst;
8916 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8917 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8918 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8919
8920 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8921 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8922 if (fAccess & IEM_ACCESS_TYPE_READ)
8923 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8924
8925 void *pvMem;
8926 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8927 if (rcStrict == VINF_SUCCESS)
8928 { /* likely */ }
8929 else
8930 {
8931 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8932 if (rcStrict == VINF_SUCCESS)
8933 return pvMem;
8934 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8935 }
8936
8937 /*
8938 * Fill in the mapping table entry.
8939 */
8940 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8941 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8942 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8943 pVCpu->iem.s.cActiveMappings++;
8944
8945 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8946 return pvMem;
8947}
8948
8949
8950/**
8951 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8952 *
8953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8954 * @param pvMem The mapping.
8955 * @param fAccess The kind of access.
8956 */
8957IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8958{
8959 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8960 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8961
8962 /* If it's bounce buffered, we may need to write back the buffer. */
8963 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8964 {
8965 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8966 {
8967 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8968 if (rcStrict == VINF_SUCCESS)
8969 return;
8970 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8971 }
8972 }
8973 /* Otherwise unlock it. */
8974 else
8975 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8976
8977 /* Free the entry. */
8978 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8979 Assert(pVCpu->iem.s.cActiveMappings != 0);
8980 pVCpu->iem.s.cActiveMappings--;
8981}
8982
8983#endif /* IEM_WITH_SETJMP */
8984
8985#ifndef IN_RING3
8986/**
8987 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8988 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8989 *
8990 * Allows the instruction to be completed and retired, while the IEM user will
8991 * return to ring-3 immediately afterwards and do the postponed writes there.
8992 *
8993 * @returns VBox status code (no strict statuses). Caller must check
8994 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8996 * @param pvMem The mapping.
8997 * @param fAccess The kind of access.
8998 */
8999IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9000{
9001 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9002 AssertReturn(iMemMap >= 0, iMemMap);
9003
9004 /* If it's bounce buffered, we may need to write back the buffer. */
9005 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9006 {
9007 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9008 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9009 }
9010 /* Otherwise unlock it. */
9011 else
9012 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9013
9014 /* Free the entry. */
9015 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9016 Assert(pVCpu->iem.s.cActiveMappings != 0);
9017 pVCpu->iem.s.cActiveMappings--;
9018 return VINF_SUCCESS;
9019}
9020#endif
9021
9022
9023/**
9024 * Rollbacks mappings, releasing page locks and such.
9025 *
9026 * The caller shall only call this after checking cActiveMappings.
9027 *
9028 * @returns Strict VBox status code to pass up.
9029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9030 */
9031IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9032{
9033 Assert(pVCpu->iem.s.cActiveMappings > 0);
9034
9035 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9036 while (iMemMap-- > 0)
9037 {
9038 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9039 if (fAccess != IEM_ACCESS_INVALID)
9040 {
9041 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9042 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9043 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9044 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9045 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9046 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9047 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9048 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9049 pVCpu->iem.s.cActiveMappings--;
9050 }
9051 }
9052}
9053
9054
9055/**
9056 * Fetches a data byte.
9057 *
9058 * @returns Strict VBox status code.
9059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9060 * @param pu8Dst Where to return the byte.
9061 * @param iSegReg The index of the segment register to use for
9062 * this access. The base and limits are checked.
9063 * @param GCPtrMem The address of the guest memory.
9064 */
9065IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9066{
9067 /* The lazy approach for now... */
9068 uint8_t const *pu8Src;
9069 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9070 if (rc == VINF_SUCCESS)
9071 {
9072 *pu8Dst = *pu8Src;
9073 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9074 }
9075 return rc;
9076}
9077
9078
9079#ifdef IEM_WITH_SETJMP
9080/**
9081 * Fetches a data byte, longjmp on error.
9082 *
9083 * @returns The byte.
9084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9085 * @param iSegReg The index of the segment register to use for
9086 * this access. The base and limits are checked.
9087 * @param GCPtrMem The address of the guest memory.
9088 */
9089DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9090{
9091 /* The lazy approach for now... */
9092 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9093 uint8_t const bRet = *pu8Src;
9094 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9095 return bRet;
9096}
9097#endif /* IEM_WITH_SETJMP */
9098
9099
9100/**
9101 * Fetches a data word.
9102 *
9103 * @returns Strict VBox status code.
9104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9105 * @param pu16Dst Where to return the word.
9106 * @param iSegReg The index of the segment register to use for
9107 * this access. The base and limits are checked.
9108 * @param GCPtrMem The address of the guest memory.
9109 */
9110IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9111{
9112 /* The lazy approach for now... */
9113 uint16_t const *pu16Src;
9114 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9115 if (rc == VINF_SUCCESS)
9116 {
9117 *pu16Dst = *pu16Src;
9118 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9119 }
9120 return rc;
9121}
9122
9123
9124#ifdef IEM_WITH_SETJMP
9125/**
9126 * Fetches a data word, longjmp on error.
9127 *
9128 * @returns The word
9129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9130 * @param iSegReg The index of the segment register to use for
9131 * this access. The base and limits are checked.
9132 * @param GCPtrMem The address of the guest memory.
9133 */
9134DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9135{
9136 /* The lazy approach for now... */
9137 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9138 uint16_t const u16Ret = *pu16Src;
9139 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9140 return u16Ret;
9141}
9142#endif
9143
9144
9145/**
9146 * Fetches a data dword.
9147 *
9148 * @returns Strict VBox status code.
9149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9150 * @param pu32Dst Where to return the dword.
9151 * @param iSegReg The index of the segment register to use for
9152 * this access. The base and limits are checked.
9153 * @param GCPtrMem The address of the guest memory.
9154 */
9155IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9156{
9157 /* The lazy approach for now... */
9158 uint32_t const *pu32Src;
9159 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9160 if (rc == VINF_SUCCESS)
9161 {
9162 *pu32Dst = *pu32Src;
9163 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9164 }
9165 return rc;
9166}
9167
9168
9169#ifdef IEM_WITH_SETJMP
9170
9171IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9172{
9173 Assert(cbMem >= 1);
9174 Assert(iSegReg < X86_SREG_COUNT);
9175
9176 /*
9177 * 64-bit mode is simpler.
9178 */
9179 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9180 {
9181 if (iSegReg >= X86_SREG_FS)
9182 {
9183 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9184 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9185 GCPtrMem += pSel->u64Base;
9186 }
9187
9188 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9189 return GCPtrMem;
9190 }
9191 /*
9192 * 16-bit and 32-bit segmentation.
9193 */
9194 else
9195 {
9196 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9197 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9198 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9199 == X86DESCATTR_P /* data, expand up */
9200 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9201 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9202 {
9203 /* expand up */
9204 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9205 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9206 && GCPtrLast32 > (uint32_t)GCPtrMem))
9207 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9208 }
9209 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9210 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9211 {
9212 /* expand down */
9213 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9214 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9215 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9216 && GCPtrLast32 > (uint32_t)GCPtrMem))
9217 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9218 }
9219 else
9220 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9221 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9222 }
9223 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9224}
9225
9226
9227IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9228{
9229 Assert(cbMem >= 1);
9230 Assert(iSegReg < X86_SREG_COUNT);
9231
9232 /*
9233 * 64-bit mode is simpler.
9234 */
9235 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9236 {
9237 if (iSegReg >= X86_SREG_FS)
9238 {
9239 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9240 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9241 GCPtrMem += pSel->u64Base;
9242 }
9243
9244 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9245 return GCPtrMem;
9246 }
9247 /*
9248 * 16-bit and 32-bit segmentation.
9249 */
9250 else
9251 {
9252 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9253 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9254 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9255 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9256 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9257 {
9258 /* expand up */
9259 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9260 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9261 && GCPtrLast32 > (uint32_t)GCPtrMem))
9262 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9263 }
9264 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9265 {
9266 /* expand down */
9267 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9268 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9269 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9270 && GCPtrLast32 > (uint32_t)GCPtrMem))
9271 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9272 }
9273 else
9274 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9275 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9276 }
9277 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9278}
9279
9280
9281/**
9282 * Fetches a data dword, longjmp on error, fallback/safe version.
9283 *
9284 * @returns The dword
9285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9286 * @param iSegReg The index of the segment register to use for
9287 * this access. The base and limits are checked.
9288 * @param GCPtrMem The address of the guest memory.
9289 */
9290IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9291{
9292 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9293 uint32_t const u32Ret = *pu32Src;
9294 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9295 return u32Ret;
9296}
9297
9298
9299/**
9300 * Fetches a data dword, longjmp on error.
9301 *
9302 * @returns The dword
9303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9304 * @param iSegReg The index of the segment register to use for
9305 * this access. The base and limits are checked.
9306 * @param GCPtrMem The address of the guest memory.
9307 */
9308DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9309{
9310# ifdef IEM_WITH_DATA_TLB
9311 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9312 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9313 {
9314 /// @todo more later.
9315 }
9316
9317 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9318# else
9319 /* The lazy approach. */
9320 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9321 uint32_t const u32Ret = *pu32Src;
9322 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9323 return u32Ret;
9324# endif
9325}
9326#endif
9327
9328
9329#ifdef SOME_UNUSED_FUNCTION
9330/**
9331 * Fetches a data dword and sign extends it to a qword.
9332 *
9333 * @returns Strict VBox status code.
9334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9335 * @param pu64Dst Where to return the sign extended value.
9336 * @param iSegReg The index of the segment register to use for
9337 * this access. The base and limits are checked.
9338 * @param GCPtrMem The address of the guest memory.
9339 */
9340IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9341{
9342 /* The lazy approach for now... */
9343 int32_t const *pi32Src;
9344 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9345 if (rc == VINF_SUCCESS)
9346 {
9347 *pu64Dst = *pi32Src;
9348 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9349 }
9350#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9351 else
9352 *pu64Dst = 0;
9353#endif
9354 return rc;
9355}
9356#endif
9357
9358
9359/**
9360 * Fetches a data qword.
9361 *
9362 * @returns Strict VBox status code.
9363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9364 * @param pu64Dst Where to return the qword.
9365 * @param iSegReg The index of the segment register to use for
9366 * this access. The base and limits are checked.
9367 * @param GCPtrMem The address of the guest memory.
9368 */
9369IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9370{
9371 /* The lazy approach for now... */
9372 uint64_t const *pu64Src;
9373 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9374 if (rc == VINF_SUCCESS)
9375 {
9376 *pu64Dst = *pu64Src;
9377 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9378 }
9379 return rc;
9380}
9381
9382
9383#ifdef IEM_WITH_SETJMP
9384/**
9385 * Fetches a data qword, longjmp on error.
9386 *
9387 * @returns The qword.
9388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9389 * @param iSegReg The index of the segment register to use for
9390 * this access. The base and limits are checked.
9391 * @param GCPtrMem The address of the guest memory.
9392 */
9393DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9394{
9395 /* The lazy approach for now... */
9396 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9397 uint64_t const u64Ret = *pu64Src;
9398 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9399 return u64Ret;
9400}
9401#endif
9402
9403
9404/**
9405 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9406 *
9407 * @returns Strict VBox status code.
9408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9409 * @param pu64Dst Where to return the qword.
9410 * @param iSegReg The index of the segment register to use for
9411 * this access. The base and limits are checked.
9412 * @param GCPtrMem The address of the guest memory.
9413 */
9414IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9415{
9416 /* The lazy approach for now... */
9417 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9418 if (RT_UNLIKELY(GCPtrMem & 15))
9419 return iemRaiseGeneralProtectionFault0(pVCpu);
9420
9421 uint64_t const *pu64Src;
9422 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9423 if (rc == VINF_SUCCESS)
9424 {
9425 *pu64Dst = *pu64Src;
9426 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9427 }
9428 return rc;
9429}
9430
9431
9432#ifdef IEM_WITH_SETJMP
9433/**
9434 * Fetches a data qword, longjmp on error.
9435 *
9436 * @returns The qword.
9437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9438 * @param iSegReg The index of the segment register to use for
9439 * this access. The base and limits are checked.
9440 * @param GCPtrMem The address of the guest memory.
9441 */
9442DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9443{
9444 /* The lazy approach for now... */
9445 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9446 if (RT_LIKELY(!(GCPtrMem & 15)))
9447 {
9448 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9449 uint64_t const u64Ret = *pu64Src;
9450 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9451 return u64Ret;
9452 }
9453
9454 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9455 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9456}
9457#endif
9458
9459
9460/**
9461 * Fetches a data tword.
9462 *
9463 * @returns Strict VBox status code.
9464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9465 * @param pr80Dst Where to return the tword.
9466 * @param iSegReg The index of the segment register to use for
9467 * this access. The base and limits are checked.
9468 * @param GCPtrMem The address of the guest memory.
9469 */
9470IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9471{
9472 /* The lazy approach for now... */
9473 PCRTFLOAT80U pr80Src;
9474 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9475 if (rc == VINF_SUCCESS)
9476 {
9477 *pr80Dst = *pr80Src;
9478 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9479 }
9480 return rc;
9481}
9482
9483
9484#ifdef IEM_WITH_SETJMP
9485/**
9486 * Fetches a data tword, longjmp on error.
9487 *
9488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9489 * @param pr80Dst Where to return the tword.
9490 * @param iSegReg The index of the segment register to use for
9491 * this access. The base and limits are checked.
9492 * @param GCPtrMem The address of the guest memory.
9493 */
9494DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9495{
9496 /* The lazy approach for now... */
9497 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9498 *pr80Dst = *pr80Src;
9499 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9500}
9501#endif
9502
9503
9504/**
9505 * Fetches a data dqword (double qword), generally SSE related.
9506 *
9507 * @returns Strict VBox status code.
9508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9509 * @param pu128Dst Where to return the qword.
9510 * @param iSegReg The index of the segment register to use for
9511 * this access. The base and limits are checked.
9512 * @param GCPtrMem The address of the guest memory.
9513 */
9514IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9515{
9516 /* The lazy approach for now... */
9517 PCRTUINT128U pu128Src;
9518 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9519 if (rc == VINF_SUCCESS)
9520 {
9521 pu128Dst->au64[0] = pu128Src->au64[0];
9522 pu128Dst->au64[1] = pu128Src->au64[1];
9523 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9524 }
9525 return rc;
9526}
9527
9528
9529#ifdef IEM_WITH_SETJMP
9530/**
9531 * Fetches a data dqword (double qword), generally SSE related.
9532 *
9533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9534 * @param pu128Dst Where to return the qword.
9535 * @param iSegReg The index of the segment register to use for
9536 * this access. The base and limits are checked.
9537 * @param GCPtrMem The address of the guest memory.
9538 */
9539IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9540{
9541 /* The lazy approach for now... */
9542 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9543 pu128Dst->au64[0] = pu128Src->au64[0];
9544 pu128Dst->au64[1] = pu128Src->au64[1];
9545 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9546}
9547#endif
9548
9549
9550/**
9551 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9552 * related.
9553 *
9554 * Raises \#GP(0) if not aligned.
9555 *
9556 * @returns Strict VBox status code.
9557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9558 * @param pu128Dst Where to return the qword.
9559 * @param iSegReg The index of the segment register to use for
9560 * this access. The base and limits are checked.
9561 * @param GCPtrMem The address of the guest memory.
9562 */
9563IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9564{
9565 /* The lazy approach for now... */
9566 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9567 if ( (GCPtrMem & 15)
9568 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9569 return iemRaiseGeneralProtectionFault0(pVCpu);
9570
9571 PCRTUINT128U pu128Src;
9572 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9573 if (rc == VINF_SUCCESS)
9574 {
9575 pu128Dst->au64[0] = pu128Src->au64[0];
9576 pu128Dst->au64[1] = pu128Src->au64[1];
9577 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9578 }
9579 return rc;
9580}
9581
9582
9583#ifdef IEM_WITH_SETJMP
9584/**
9585 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9586 * related, longjmp on error.
9587 *
9588 * Raises \#GP(0) if not aligned.
9589 *
9590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9591 * @param pu128Dst Where to return the qword.
9592 * @param iSegReg The index of the segment register to use for
9593 * this access. The base and limits are checked.
9594 * @param GCPtrMem The address of the guest memory.
9595 */
9596DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9597{
9598 /* The lazy approach for now... */
9599 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9600 if ( (GCPtrMem & 15) == 0
9601 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9602 {
9603 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9604 pu128Dst->au64[0] = pu128Src->au64[0];
9605 pu128Dst->au64[1] = pu128Src->au64[1];
9606 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9607 return;
9608 }
9609
9610 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9611 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9612}
9613#endif
9614
9615
9616/**
9617 * Fetches a data oword (octo word), generally AVX related.
9618 *
9619 * @returns Strict VBox status code.
9620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9621 * @param pu256Dst Where to return the qword.
9622 * @param iSegReg The index of the segment register to use for
9623 * this access. The base and limits are checked.
9624 * @param GCPtrMem The address of the guest memory.
9625 */
9626IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9627{
9628 /* The lazy approach for now... */
9629 PCRTUINT256U pu256Src;
9630 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9631 if (rc == VINF_SUCCESS)
9632 {
9633 pu256Dst->au64[0] = pu256Src->au64[0];
9634 pu256Dst->au64[1] = pu256Src->au64[1];
9635 pu256Dst->au64[2] = pu256Src->au64[2];
9636 pu256Dst->au64[3] = pu256Src->au64[3];
9637 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9638 }
9639 return rc;
9640}
9641
9642
9643#ifdef IEM_WITH_SETJMP
9644/**
9645 * Fetches a data oword (octo word), generally AVX related.
9646 *
9647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9648 * @param pu256Dst Where to return the qword.
9649 * @param iSegReg The index of the segment register to use for
9650 * this access. The base and limits are checked.
9651 * @param GCPtrMem The address of the guest memory.
9652 */
9653IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9654{
9655 /* The lazy approach for now... */
9656 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9657 pu256Dst->au64[0] = pu256Src->au64[0];
9658 pu256Dst->au64[1] = pu256Src->au64[1];
9659 pu256Dst->au64[2] = pu256Src->au64[2];
9660 pu256Dst->au64[3] = pu256Src->au64[3];
9661 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9662}
9663#endif
9664
9665
9666/**
9667 * Fetches a data oword (octo word) at an aligned address, generally AVX
9668 * related.
9669 *
9670 * Raises \#GP(0) if not aligned.
9671 *
9672 * @returns Strict VBox status code.
9673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9674 * @param pu256Dst Where to return the qword.
9675 * @param iSegReg The index of the segment register to use for
9676 * this access. The base and limits are checked.
9677 * @param GCPtrMem The address of the guest memory.
9678 */
9679IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9680{
9681 /* The lazy approach for now... */
9682 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9683 if (GCPtrMem & 31)
9684 return iemRaiseGeneralProtectionFault0(pVCpu);
9685
9686 PCRTUINT256U pu256Src;
9687 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9688 if (rc == VINF_SUCCESS)
9689 {
9690 pu256Dst->au64[0] = pu256Src->au64[0];
9691 pu256Dst->au64[1] = pu256Src->au64[1];
9692 pu256Dst->au64[2] = pu256Src->au64[2];
9693 pu256Dst->au64[3] = pu256Src->au64[3];
9694 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9695 }
9696 return rc;
9697}
9698
9699
9700#ifdef IEM_WITH_SETJMP
9701/**
9702 * Fetches a data oword (octo word) at an aligned address, generally AVX
9703 * related, longjmp on error.
9704 *
9705 * Raises \#GP(0) if not aligned.
9706 *
9707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9708 * @param pu256Dst Where to return the qword.
9709 * @param iSegReg The index of the segment register to use for
9710 * this access. The base and limits are checked.
9711 * @param GCPtrMem The address of the guest memory.
9712 */
9713DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9714{
9715 /* The lazy approach for now... */
9716 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9717 if ((GCPtrMem & 31) == 0)
9718 {
9719 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9720 pu256Dst->au64[0] = pu256Src->au64[0];
9721 pu256Dst->au64[1] = pu256Src->au64[1];
9722 pu256Dst->au64[2] = pu256Src->au64[2];
9723 pu256Dst->au64[3] = pu256Src->au64[3];
9724 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9725 return;
9726 }
9727
9728 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9729 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9730}
9731#endif
9732
9733
9734
9735/**
9736 * Fetches a descriptor register (lgdt, lidt).
9737 *
9738 * @returns Strict VBox status code.
9739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9740 * @param pcbLimit Where to return the limit.
9741 * @param pGCPtrBase Where to return the base.
9742 * @param iSegReg The index of the segment register to use for
9743 * this access. The base and limits are checked.
9744 * @param GCPtrMem The address of the guest memory.
9745 * @param enmOpSize The effective operand size.
9746 */
9747IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9748 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9749{
9750 /*
9751 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9752 * little special:
9753 * - The two reads are done separately.
9754 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9755 * - We suspect the 386 to actually commit the limit before the base in
9756 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9757 * don't try emulate this eccentric behavior, because it's not well
9758 * enough understood and rather hard to trigger.
9759 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9760 */
9761 VBOXSTRICTRC rcStrict;
9762 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9763 {
9764 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9765 if (rcStrict == VINF_SUCCESS)
9766 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9767 }
9768 else
9769 {
9770 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9771 if (enmOpSize == IEMMODE_32BIT)
9772 {
9773 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9774 {
9775 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9776 if (rcStrict == VINF_SUCCESS)
9777 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9778 }
9779 else
9780 {
9781 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9782 if (rcStrict == VINF_SUCCESS)
9783 {
9784 *pcbLimit = (uint16_t)uTmp;
9785 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9786 }
9787 }
9788 if (rcStrict == VINF_SUCCESS)
9789 *pGCPtrBase = uTmp;
9790 }
9791 else
9792 {
9793 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9794 if (rcStrict == VINF_SUCCESS)
9795 {
9796 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9797 if (rcStrict == VINF_SUCCESS)
9798 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9799 }
9800 }
9801 }
9802 return rcStrict;
9803}
9804
9805
9806
9807/**
9808 * Stores a data byte.
9809 *
9810 * @returns Strict VBox status code.
9811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9812 * @param iSegReg The index of the segment register to use for
9813 * this access. The base and limits are checked.
9814 * @param GCPtrMem The address of the guest memory.
9815 * @param u8Value The value to store.
9816 */
9817IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9818{
9819 /* The lazy approach for now... */
9820 uint8_t *pu8Dst;
9821 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9822 if (rc == VINF_SUCCESS)
9823 {
9824 *pu8Dst = u8Value;
9825 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9826 }
9827 return rc;
9828}
9829
9830
9831#ifdef IEM_WITH_SETJMP
9832/**
9833 * Stores a data byte, longjmp on error.
9834 *
9835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9836 * @param iSegReg The index of the segment register to use for
9837 * this access. The base and limits are checked.
9838 * @param GCPtrMem The address of the guest memory.
9839 * @param u8Value The value to store.
9840 */
9841IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9842{
9843 /* The lazy approach for now... */
9844 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9845 *pu8Dst = u8Value;
9846 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9847}
9848#endif
9849
9850
9851/**
9852 * Stores a data word.
9853 *
9854 * @returns Strict VBox status code.
9855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9856 * @param iSegReg The index of the segment register to use for
9857 * this access. The base and limits are checked.
9858 * @param GCPtrMem The address of the guest memory.
9859 * @param u16Value The value to store.
9860 */
9861IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9862{
9863 /* The lazy approach for now... */
9864 uint16_t *pu16Dst;
9865 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9866 if (rc == VINF_SUCCESS)
9867 {
9868 *pu16Dst = u16Value;
9869 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9870 }
9871 return rc;
9872}
9873
9874
9875#ifdef IEM_WITH_SETJMP
9876/**
9877 * Stores a data word, longjmp on error.
9878 *
9879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9880 * @param iSegReg The index of the segment register to use for
9881 * this access. The base and limits are checked.
9882 * @param GCPtrMem The address of the guest memory.
9883 * @param u16Value The value to store.
9884 */
9885IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9886{
9887 /* The lazy approach for now... */
9888 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9889 *pu16Dst = u16Value;
9890 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9891}
9892#endif
9893
9894
9895/**
9896 * Stores a data dword.
9897 *
9898 * @returns Strict VBox status code.
9899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9900 * @param iSegReg The index of the segment register to use for
9901 * this access. The base and limits are checked.
9902 * @param GCPtrMem The address of the guest memory.
9903 * @param u32Value The value to store.
9904 */
9905IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9906{
9907 /* The lazy approach for now... */
9908 uint32_t *pu32Dst;
9909 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9910 if (rc == VINF_SUCCESS)
9911 {
9912 *pu32Dst = u32Value;
9913 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9914 }
9915 return rc;
9916}
9917
9918
9919#ifdef IEM_WITH_SETJMP
9920/**
9921 * Stores a data dword.
9922 *
9923 * @returns Strict VBox status code.
9924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9925 * @param iSegReg The index of the segment register to use for
9926 * this access. The base and limits are checked.
9927 * @param GCPtrMem The address of the guest memory.
9928 * @param u32Value The value to store.
9929 */
9930IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9931{
9932 /* The lazy approach for now... */
9933 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9934 *pu32Dst = u32Value;
9935 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9936}
9937#endif
9938
9939
9940/**
9941 * Stores a data qword.
9942 *
9943 * @returns Strict VBox status code.
9944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9945 * @param iSegReg The index of the segment register to use for
9946 * this access. The base and limits are checked.
9947 * @param GCPtrMem The address of the guest memory.
9948 * @param u64Value The value to store.
9949 */
9950IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9951{
9952 /* The lazy approach for now... */
9953 uint64_t *pu64Dst;
9954 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9955 if (rc == VINF_SUCCESS)
9956 {
9957 *pu64Dst = u64Value;
9958 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9959 }
9960 return rc;
9961}
9962
9963
9964#ifdef IEM_WITH_SETJMP
9965/**
9966 * Stores a data qword, longjmp on error.
9967 *
9968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9969 * @param iSegReg The index of the segment register to use for
9970 * this access. The base and limits are checked.
9971 * @param GCPtrMem The address of the guest memory.
9972 * @param u64Value The value to store.
9973 */
9974IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9975{
9976 /* The lazy approach for now... */
9977 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9978 *pu64Dst = u64Value;
9979 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9980}
9981#endif
9982
9983
9984/**
9985 * Stores a data dqword.
9986 *
9987 * @returns Strict VBox status code.
9988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9989 * @param iSegReg The index of the segment register to use for
9990 * this access. The base and limits are checked.
9991 * @param GCPtrMem The address of the guest memory.
9992 * @param u128Value The value to store.
9993 */
9994IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9995{
9996 /* The lazy approach for now... */
9997 PRTUINT128U pu128Dst;
9998 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9999 if (rc == VINF_SUCCESS)
10000 {
10001 pu128Dst->au64[0] = u128Value.au64[0];
10002 pu128Dst->au64[1] = u128Value.au64[1];
10003 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10004 }
10005 return rc;
10006}
10007
10008
10009#ifdef IEM_WITH_SETJMP
10010/**
10011 * Stores a data dqword, longjmp on error.
10012 *
10013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10014 * @param iSegReg The index of the segment register to use for
10015 * this access. The base and limits are checked.
10016 * @param GCPtrMem The address of the guest memory.
10017 * @param u128Value The value to store.
10018 */
10019IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10020{
10021 /* The lazy approach for now... */
10022 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10023 pu128Dst->au64[0] = u128Value.au64[0];
10024 pu128Dst->au64[1] = u128Value.au64[1];
10025 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10026}
10027#endif
10028
10029
10030/**
10031 * Stores a data dqword, SSE aligned.
10032 *
10033 * @returns Strict VBox status code.
10034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10035 * @param iSegReg The index of the segment register to use for
10036 * this access. The base and limits are checked.
10037 * @param GCPtrMem The address of the guest memory.
10038 * @param u128Value The value to store.
10039 */
10040IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10041{
10042 /* The lazy approach for now... */
10043 if ( (GCPtrMem & 15)
10044 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10045 return iemRaiseGeneralProtectionFault0(pVCpu);
10046
10047 PRTUINT128U pu128Dst;
10048 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10049 if (rc == VINF_SUCCESS)
10050 {
10051 pu128Dst->au64[0] = u128Value.au64[0];
10052 pu128Dst->au64[1] = u128Value.au64[1];
10053 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10054 }
10055 return rc;
10056}
10057
10058
10059#ifdef IEM_WITH_SETJMP
10060/**
10061 * Stores a data dqword, SSE aligned.
10062 *
10063 * @returns Strict VBox status code.
10064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10065 * @param iSegReg The index of the segment register to use for
10066 * this access. The base and limits are checked.
10067 * @param GCPtrMem The address of the guest memory.
10068 * @param u128Value The value to store.
10069 */
10070DECL_NO_INLINE(IEM_STATIC, void)
10071iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10072{
10073 /* The lazy approach for now... */
10074 if ( (GCPtrMem & 15) == 0
10075 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10076 {
10077 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10078 pu128Dst->au64[0] = u128Value.au64[0];
10079 pu128Dst->au64[1] = u128Value.au64[1];
10080 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10081 return;
10082 }
10083
10084 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10085 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10086}
10087#endif
10088
10089
10090/**
10091 * Stores a data dqword.
10092 *
10093 * @returns Strict VBox status code.
10094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10095 * @param iSegReg The index of the segment register to use for
10096 * this access. The base and limits are checked.
10097 * @param GCPtrMem The address of the guest memory.
10098 * @param pu256Value Pointer to the value to store.
10099 */
10100IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10101{
10102 /* The lazy approach for now... */
10103 PRTUINT256U pu256Dst;
10104 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10105 if (rc == VINF_SUCCESS)
10106 {
10107 pu256Dst->au64[0] = pu256Value->au64[0];
10108 pu256Dst->au64[1] = pu256Value->au64[1];
10109 pu256Dst->au64[2] = pu256Value->au64[2];
10110 pu256Dst->au64[3] = pu256Value->au64[3];
10111 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10112 }
10113 return rc;
10114}
10115
10116
10117#ifdef IEM_WITH_SETJMP
10118/**
10119 * Stores a data dqword, longjmp on error.
10120 *
10121 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10122 * @param iSegReg The index of the segment register to use for
10123 * this access. The base and limits are checked.
10124 * @param GCPtrMem The address of the guest memory.
10125 * @param pu256Value Pointer to the value to store.
10126 */
10127IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10128{
10129 /* The lazy approach for now... */
10130 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10131 pu256Dst->au64[0] = pu256Value->au64[0];
10132 pu256Dst->au64[1] = pu256Value->au64[1];
10133 pu256Dst->au64[2] = pu256Value->au64[2];
10134 pu256Dst->au64[3] = pu256Value->au64[3];
10135 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10136}
10137#endif
10138
10139
10140/**
10141 * Stores a data dqword, AVX aligned.
10142 *
10143 * @returns Strict VBox status code.
10144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10145 * @param iSegReg The index of the segment register to use for
10146 * this access. The base and limits are checked.
10147 * @param GCPtrMem The address of the guest memory.
10148 * @param pu256Value Pointer to the value to store.
10149 */
10150IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10151{
10152 /* The lazy approach for now... */
10153 if (GCPtrMem & 31)
10154 return iemRaiseGeneralProtectionFault0(pVCpu);
10155
10156 PRTUINT256U pu256Dst;
10157 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10158 if (rc == VINF_SUCCESS)
10159 {
10160 pu256Dst->au64[0] = pu256Value->au64[0];
10161 pu256Dst->au64[1] = pu256Value->au64[1];
10162 pu256Dst->au64[2] = pu256Value->au64[2];
10163 pu256Dst->au64[3] = pu256Value->au64[3];
10164 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10165 }
10166 return rc;
10167}
10168
10169
10170#ifdef IEM_WITH_SETJMP
10171/**
10172 * Stores a data dqword, AVX aligned.
10173 *
10174 * @returns Strict VBox status code.
10175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10176 * @param iSegReg The index of the segment register to use for
10177 * this access. The base and limits are checked.
10178 * @param GCPtrMem The address of the guest memory.
10179 * @param pu256Value Pointer to the value to store.
10180 */
10181DECL_NO_INLINE(IEM_STATIC, void)
10182iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10183{
10184 /* The lazy approach for now... */
10185 if ((GCPtrMem & 31) == 0)
10186 {
10187 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10188 pu256Dst->au64[0] = pu256Value->au64[0];
10189 pu256Dst->au64[1] = pu256Value->au64[1];
10190 pu256Dst->au64[2] = pu256Value->au64[2];
10191 pu256Dst->au64[3] = pu256Value->au64[3];
10192 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10193 return;
10194 }
10195
10196 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10197 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10198}
10199#endif
10200
10201
10202/**
10203 * Stores a descriptor register (sgdt, sidt).
10204 *
10205 * @returns Strict VBox status code.
10206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10207 * @param cbLimit The limit.
10208 * @param GCPtrBase The base address.
10209 * @param iSegReg The index of the segment register to use for
10210 * this access. The base and limits are checked.
10211 * @param GCPtrMem The address of the guest memory.
10212 */
10213IEM_STATIC VBOXSTRICTRC
10214iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10215{
10216 /*
10217 * The SIDT and SGDT instructions actually stores the data using two
10218 * independent writes. The instructions does not respond to opsize prefixes.
10219 */
10220 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10221 if (rcStrict == VINF_SUCCESS)
10222 {
10223 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10224 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10225 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10226 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10227 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10228 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10229 else
10230 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10231 }
10232 return rcStrict;
10233}
10234
10235
10236/**
10237 * Pushes a word onto the stack.
10238 *
10239 * @returns Strict VBox status code.
10240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10241 * @param u16Value The value to push.
10242 */
10243IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10244{
10245 /* Increment the stack pointer. */
10246 uint64_t uNewRsp;
10247 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10248
10249 /* Write the word the lazy way. */
10250 uint16_t *pu16Dst;
10251 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10252 if (rc == VINF_SUCCESS)
10253 {
10254 *pu16Dst = u16Value;
10255 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10256 }
10257
10258 /* Commit the new RSP value unless we an access handler made trouble. */
10259 if (rc == VINF_SUCCESS)
10260 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10261
10262 return rc;
10263}
10264
10265
10266/**
10267 * Pushes a dword onto the stack.
10268 *
10269 * @returns Strict VBox status code.
10270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10271 * @param u32Value The value to push.
10272 */
10273IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10274{
10275 /* Increment the stack pointer. */
10276 uint64_t uNewRsp;
10277 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10278
10279 /* Write the dword the lazy way. */
10280 uint32_t *pu32Dst;
10281 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10282 if (rc == VINF_SUCCESS)
10283 {
10284 *pu32Dst = u32Value;
10285 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10286 }
10287
10288 /* Commit the new RSP value unless we an access handler made trouble. */
10289 if (rc == VINF_SUCCESS)
10290 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10291
10292 return rc;
10293}
10294
10295
10296/**
10297 * Pushes a dword segment register value onto the stack.
10298 *
10299 * @returns Strict VBox status code.
10300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10301 * @param u32Value The value to push.
10302 */
10303IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10304{
10305 /* Increment the stack pointer. */
10306 uint64_t uNewRsp;
10307 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10308
10309 /* The intel docs talks about zero extending the selector register
10310 value. My actual intel CPU here might be zero extending the value
10311 but it still only writes the lower word... */
10312 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10313 * happens when crossing an electric page boundrary, is the high word checked
10314 * for write accessibility or not? Probably it is. What about segment limits?
10315 * It appears this behavior is also shared with trap error codes.
10316 *
10317 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10318 * ancient hardware when it actually did change. */
10319 uint16_t *pu16Dst;
10320 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10321 if (rc == VINF_SUCCESS)
10322 {
10323 *pu16Dst = (uint16_t)u32Value;
10324 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10325 }
10326
10327 /* Commit the new RSP value unless we an access handler made trouble. */
10328 if (rc == VINF_SUCCESS)
10329 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10330
10331 return rc;
10332}
10333
10334
10335/**
10336 * Pushes a qword onto the stack.
10337 *
10338 * @returns Strict VBox status code.
10339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10340 * @param u64Value The value to push.
10341 */
10342IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10343{
10344 /* Increment the stack pointer. */
10345 uint64_t uNewRsp;
10346 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10347
10348 /* Write the word the lazy way. */
10349 uint64_t *pu64Dst;
10350 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10351 if (rc == VINF_SUCCESS)
10352 {
10353 *pu64Dst = u64Value;
10354 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10355 }
10356
10357 /* Commit the new RSP value unless we an access handler made trouble. */
10358 if (rc == VINF_SUCCESS)
10359 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10360
10361 return rc;
10362}
10363
10364
10365/**
10366 * Pops a word from the stack.
10367 *
10368 * @returns Strict VBox status code.
10369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10370 * @param pu16Value Where to store the popped value.
10371 */
10372IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10373{
10374 /* Increment the stack pointer. */
10375 uint64_t uNewRsp;
10376 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10377
10378 /* Write the word the lazy way. */
10379 uint16_t const *pu16Src;
10380 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10381 if (rc == VINF_SUCCESS)
10382 {
10383 *pu16Value = *pu16Src;
10384 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10385
10386 /* Commit the new RSP value. */
10387 if (rc == VINF_SUCCESS)
10388 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10389 }
10390
10391 return rc;
10392}
10393
10394
10395/**
10396 * Pops a dword from the stack.
10397 *
10398 * @returns Strict VBox status code.
10399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10400 * @param pu32Value Where to store the popped value.
10401 */
10402IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10403{
10404 /* Increment the stack pointer. */
10405 uint64_t uNewRsp;
10406 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10407
10408 /* Write the word the lazy way. */
10409 uint32_t const *pu32Src;
10410 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10411 if (rc == VINF_SUCCESS)
10412 {
10413 *pu32Value = *pu32Src;
10414 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10415
10416 /* Commit the new RSP value. */
10417 if (rc == VINF_SUCCESS)
10418 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10419 }
10420
10421 return rc;
10422}
10423
10424
10425/**
10426 * Pops a qword from the stack.
10427 *
10428 * @returns Strict VBox status code.
10429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10430 * @param pu64Value Where to store the popped value.
10431 */
10432IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10433{
10434 /* Increment the stack pointer. */
10435 uint64_t uNewRsp;
10436 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10437
10438 /* Write the word the lazy way. */
10439 uint64_t const *pu64Src;
10440 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10441 if (rc == VINF_SUCCESS)
10442 {
10443 *pu64Value = *pu64Src;
10444 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10445
10446 /* Commit the new RSP value. */
10447 if (rc == VINF_SUCCESS)
10448 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10449 }
10450
10451 return rc;
10452}
10453
10454
10455/**
10456 * Pushes a word onto the stack, using a temporary stack pointer.
10457 *
10458 * @returns Strict VBox status code.
10459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10460 * @param u16Value The value to push.
10461 * @param pTmpRsp Pointer to the temporary stack pointer.
10462 */
10463IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10464{
10465 /* Increment the stack pointer. */
10466 RTUINT64U NewRsp = *pTmpRsp;
10467 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10468
10469 /* Write the word the lazy way. */
10470 uint16_t *pu16Dst;
10471 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10472 if (rc == VINF_SUCCESS)
10473 {
10474 *pu16Dst = u16Value;
10475 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10476 }
10477
10478 /* Commit the new RSP value unless we an access handler made trouble. */
10479 if (rc == VINF_SUCCESS)
10480 *pTmpRsp = NewRsp;
10481
10482 return rc;
10483}
10484
10485
10486/**
10487 * Pushes a dword onto the stack, using a temporary stack pointer.
10488 *
10489 * @returns Strict VBox status code.
10490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10491 * @param u32Value The value to push.
10492 * @param pTmpRsp Pointer to the temporary stack pointer.
10493 */
10494IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10495{
10496 /* Increment the stack pointer. */
10497 RTUINT64U NewRsp = *pTmpRsp;
10498 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10499
10500 /* Write the word the lazy way. */
10501 uint32_t *pu32Dst;
10502 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10503 if (rc == VINF_SUCCESS)
10504 {
10505 *pu32Dst = u32Value;
10506 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10507 }
10508
10509 /* Commit the new RSP value unless we an access handler made trouble. */
10510 if (rc == VINF_SUCCESS)
10511 *pTmpRsp = NewRsp;
10512
10513 return rc;
10514}
10515
10516
10517/**
10518 * Pushes a dword onto the stack, using a temporary stack pointer.
10519 *
10520 * @returns Strict VBox status code.
10521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10522 * @param u64Value The value to push.
10523 * @param pTmpRsp Pointer to the temporary stack pointer.
10524 */
10525IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10526{
10527 /* Increment the stack pointer. */
10528 RTUINT64U NewRsp = *pTmpRsp;
10529 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10530
10531 /* Write the word the lazy way. */
10532 uint64_t *pu64Dst;
10533 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10534 if (rc == VINF_SUCCESS)
10535 {
10536 *pu64Dst = u64Value;
10537 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10538 }
10539
10540 /* Commit the new RSP value unless we an access handler made trouble. */
10541 if (rc == VINF_SUCCESS)
10542 *pTmpRsp = NewRsp;
10543
10544 return rc;
10545}
10546
10547
10548/**
10549 * Pops a word from the stack, using a temporary stack pointer.
10550 *
10551 * @returns Strict VBox status code.
10552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10553 * @param pu16Value Where to store the popped value.
10554 * @param pTmpRsp Pointer to the temporary stack pointer.
10555 */
10556IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10557{
10558 /* Increment the stack pointer. */
10559 RTUINT64U NewRsp = *pTmpRsp;
10560 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10561
10562 /* Write the word the lazy way. */
10563 uint16_t const *pu16Src;
10564 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10565 if (rc == VINF_SUCCESS)
10566 {
10567 *pu16Value = *pu16Src;
10568 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10569
10570 /* Commit the new RSP value. */
10571 if (rc == VINF_SUCCESS)
10572 *pTmpRsp = NewRsp;
10573 }
10574
10575 return rc;
10576}
10577
10578
10579/**
10580 * Pops a dword from the stack, using a temporary stack pointer.
10581 *
10582 * @returns Strict VBox status code.
10583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10584 * @param pu32Value Where to store the popped value.
10585 * @param pTmpRsp Pointer to the temporary stack pointer.
10586 */
10587IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10588{
10589 /* Increment the stack pointer. */
10590 RTUINT64U NewRsp = *pTmpRsp;
10591 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10592
10593 /* Write the word the lazy way. */
10594 uint32_t const *pu32Src;
10595 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10596 if (rc == VINF_SUCCESS)
10597 {
10598 *pu32Value = *pu32Src;
10599 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10600
10601 /* Commit the new RSP value. */
10602 if (rc == VINF_SUCCESS)
10603 *pTmpRsp = NewRsp;
10604 }
10605
10606 return rc;
10607}
10608
10609
10610/**
10611 * Pops a qword from the stack, using a temporary stack pointer.
10612 *
10613 * @returns Strict VBox status code.
10614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10615 * @param pu64Value Where to store the popped value.
10616 * @param pTmpRsp Pointer to the temporary stack pointer.
10617 */
10618IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10619{
10620 /* Increment the stack pointer. */
10621 RTUINT64U NewRsp = *pTmpRsp;
10622 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10623
10624 /* Write the word the lazy way. */
10625 uint64_t const *pu64Src;
10626 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10627 if (rcStrict == VINF_SUCCESS)
10628 {
10629 *pu64Value = *pu64Src;
10630 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10631
10632 /* Commit the new RSP value. */
10633 if (rcStrict == VINF_SUCCESS)
10634 *pTmpRsp = NewRsp;
10635 }
10636
10637 return rcStrict;
10638}
10639
10640
10641/**
10642 * Begin a special stack push (used by interrupt, exceptions and such).
10643 *
10644 * This will raise \#SS or \#PF if appropriate.
10645 *
10646 * @returns Strict VBox status code.
10647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10648 * @param cbMem The number of bytes to push onto the stack.
10649 * @param ppvMem Where to return the pointer to the stack memory.
10650 * As with the other memory functions this could be
10651 * direct access or bounce buffered access, so
10652 * don't commit register until the commit call
10653 * succeeds.
10654 * @param puNewRsp Where to return the new RSP value. This must be
10655 * passed unchanged to
10656 * iemMemStackPushCommitSpecial().
10657 */
10658IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10659{
10660 Assert(cbMem < UINT8_MAX);
10661 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10662 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10663}
10664
10665
10666/**
10667 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10668 *
10669 * This will update the rSP.
10670 *
10671 * @returns Strict VBox status code.
10672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10673 * @param pvMem The pointer returned by
10674 * iemMemStackPushBeginSpecial().
10675 * @param uNewRsp The new RSP value returned by
10676 * iemMemStackPushBeginSpecial().
10677 */
10678IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10679{
10680 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10681 if (rcStrict == VINF_SUCCESS)
10682 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10683 return rcStrict;
10684}
10685
10686
10687/**
10688 * Begin a special stack pop (used by iret, retf and such).
10689 *
10690 * This will raise \#SS or \#PF if appropriate.
10691 *
10692 * @returns Strict VBox status code.
10693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10694 * @param cbMem The number of bytes to pop from the stack.
10695 * @param ppvMem Where to return the pointer to the stack memory.
10696 * @param puNewRsp Where to return the new RSP value. This must be
10697 * assigned to CPUMCTX::rsp manually some time
10698 * after iemMemStackPopDoneSpecial() has been
10699 * called.
10700 */
10701IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10702{
10703 Assert(cbMem < UINT8_MAX);
10704 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10705 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10706}
10707
10708
10709/**
10710 * Continue a special stack pop (used by iret and retf).
10711 *
10712 * This will raise \#SS or \#PF if appropriate.
10713 *
10714 * @returns Strict VBox status code.
10715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10716 * @param cbMem The number of bytes to pop from the stack.
10717 * @param ppvMem Where to return the pointer to the stack memory.
10718 * @param puNewRsp Where to return the new RSP value. This must be
10719 * assigned to CPUMCTX::rsp manually some time
10720 * after iemMemStackPopDoneSpecial() has been
10721 * called.
10722 */
10723IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10724{
10725 Assert(cbMem < UINT8_MAX);
10726 RTUINT64U NewRsp;
10727 NewRsp.u = *puNewRsp;
10728 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10729 *puNewRsp = NewRsp.u;
10730 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10731}
10732
10733
10734/**
10735 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10736 * iemMemStackPopContinueSpecial).
10737 *
10738 * The caller will manually commit the rSP.
10739 *
10740 * @returns Strict VBox status code.
10741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10742 * @param pvMem The pointer returned by
10743 * iemMemStackPopBeginSpecial() or
10744 * iemMemStackPopContinueSpecial().
10745 */
10746IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10747{
10748 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10749}
10750
10751
10752/**
10753 * Fetches a system table byte.
10754 *
10755 * @returns Strict VBox status code.
10756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10757 * @param pbDst Where to return the byte.
10758 * @param iSegReg The index of the segment register to use for
10759 * this access. The base and limits are checked.
10760 * @param GCPtrMem The address of the guest memory.
10761 */
10762IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10763{
10764 /* The lazy approach for now... */
10765 uint8_t const *pbSrc;
10766 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10767 if (rc == VINF_SUCCESS)
10768 {
10769 *pbDst = *pbSrc;
10770 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10771 }
10772 return rc;
10773}
10774
10775
10776/**
10777 * Fetches a system table word.
10778 *
10779 * @returns Strict VBox status code.
10780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10781 * @param pu16Dst Where to return the word.
10782 * @param iSegReg The index of the segment register to use for
10783 * this access. The base and limits are checked.
10784 * @param GCPtrMem The address of the guest memory.
10785 */
10786IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10787{
10788 /* The lazy approach for now... */
10789 uint16_t const *pu16Src;
10790 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10791 if (rc == VINF_SUCCESS)
10792 {
10793 *pu16Dst = *pu16Src;
10794 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10795 }
10796 return rc;
10797}
10798
10799
10800/**
10801 * Fetches a system table dword.
10802 *
10803 * @returns Strict VBox status code.
10804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10805 * @param pu32Dst Where to return the dword.
10806 * @param iSegReg The index of the segment register to use for
10807 * this access. The base and limits are checked.
10808 * @param GCPtrMem The address of the guest memory.
10809 */
10810IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10811{
10812 /* The lazy approach for now... */
10813 uint32_t const *pu32Src;
10814 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10815 if (rc == VINF_SUCCESS)
10816 {
10817 *pu32Dst = *pu32Src;
10818 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10819 }
10820 return rc;
10821}
10822
10823
10824/**
10825 * Fetches a system table qword.
10826 *
10827 * @returns Strict VBox status code.
10828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10829 * @param pu64Dst Where to return the qword.
10830 * @param iSegReg The index of the segment register to use for
10831 * this access. The base and limits are checked.
10832 * @param GCPtrMem The address of the guest memory.
10833 */
10834IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10835{
10836 /* The lazy approach for now... */
10837 uint64_t const *pu64Src;
10838 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10839 if (rc == VINF_SUCCESS)
10840 {
10841 *pu64Dst = *pu64Src;
10842 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10843 }
10844 return rc;
10845}
10846
10847
10848/**
10849 * Fetches a descriptor table entry with caller specified error code.
10850 *
10851 * @returns Strict VBox status code.
10852 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10853 * @param pDesc Where to return the descriptor table entry.
10854 * @param uSel The selector which table entry to fetch.
10855 * @param uXcpt The exception to raise on table lookup error.
10856 * @param uErrorCode The error code associated with the exception.
10857 */
10858IEM_STATIC VBOXSTRICTRC
10859iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10860{
10861 AssertPtr(pDesc);
10862 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10863
10864 /** @todo did the 286 require all 8 bytes to be accessible? */
10865 /*
10866 * Get the selector table base and check bounds.
10867 */
10868 RTGCPTR GCPtrBase;
10869 if (uSel & X86_SEL_LDT)
10870 {
10871 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10872 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10873 {
10874 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10875 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10876 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10877 uErrorCode, 0);
10878 }
10879
10880 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10881 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10882 }
10883 else
10884 {
10885 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10886 {
10887 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10888 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10889 uErrorCode, 0);
10890 }
10891 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10892 }
10893
10894 /*
10895 * Read the legacy descriptor and maybe the long mode extensions if
10896 * required.
10897 */
10898 VBOXSTRICTRC rcStrict;
10899 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10900 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10901 else
10902 {
10903 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10904 if (rcStrict == VINF_SUCCESS)
10905 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10906 if (rcStrict == VINF_SUCCESS)
10907 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10908 if (rcStrict == VINF_SUCCESS)
10909 pDesc->Legacy.au16[3] = 0;
10910 else
10911 return rcStrict;
10912 }
10913
10914 if (rcStrict == VINF_SUCCESS)
10915 {
10916 if ( !IEM_IS_LONG_MODE(pVCpu)
10917 || pDesc->Legacy.Gen.u1DescType)
10918 pDesc->Long.au64[1] = 0;
10919 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10920 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10921 else
10922 {
10923 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10924 /** @todo is this the right exception? */
10925 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10926 }
10927 }
10928 return rcStrict;
10929}
10930
10931
10932/**
10933 * Fetches a descriptor table entry.
10934 *
10935 * @returns Strict VBox status code.
10936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10937 * @param pDesc Where to return the descriptor table entry.
10938 * @param uSel The selector which table entry to fetch.
10939 * @param uXcpt The exception to raise on table lookup error.
10940 */
10941IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10942{
10943 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10944}
10945
10946
10947/**
10948 * Fakes a long mode stack selector for SS = 0.
10949 *
10950 * @param pDescSs Where to return the fake stack descriptor.
10951 * @param uDpl The DPL we want.
10952 */
10953IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10954{
10955 pDescSs->Long.au64[0] = 0;
10956 pDescSs->Long.au64[1] = 0;
10957 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10958 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10959 pDescSs->Long.Gen.u2Dpl = uDpl;
10960 pDescSs->Long.Gen.u1Present = 1;
10961 pDescSs->Long.Gen.u1Long = 1;
10962}
10963
10964
10965/**
10966 * Marks the selector descriptor as accessed (only non-system descriptors).
10967 *
10968 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10969 * will therefore skip the limit checks.
10970 *
10971 * @returns Strict VBox status code.
10972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10973 * @param uSel The selector.
10974 */
10975IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10976{
10977 /*
10978 * Get the selector table base and calculate the entry address.
10979 */
10980 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10981 ? pVCpu->cpum.GstCtx.ldtr.u64Base
10982 : pVCpu->cpum.GstCtx.gdtr.pGdt;
10983 GCPtr += uSel & X86_SEL_MASK;
10984
10985 /*
10986 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10987 * ugly stuff to avoid this. This will make sure it's an atomic access
10988 * as well more or less remove any question about 8-bit or 32-bit accesss.
10989 */
10990 VBOXSTRICTRC rcStrict;
10991 uint32_t volatile *pu32;
10992 if ((GCPtr & 3) == 0)
10993 {
10994 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10995 GCPtr += 2 + 2;
10996 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10997 if (rcStrict != VINF_SUCCESS)
10998 return rcStrict;
10999 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11000 }
11001 else
11002 {
11003 /* The misaligned GDT/LDT case, map the whole thing. */
11004 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11005 if (rcStrict != VINF_SUCCESS)
11006 return rcStrict;
11007 switch ((uintptr_t)pu32 & 3)
11008 {
11009 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11010 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11011 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11012 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11013 }
11014 }
11015
11016 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11017}
11018
11019/** @} */
11020
11021
11022/*
11023 * Include the C/C++ implementation of instruction.
11024 */
11025#include "IEMAllCImpl.cpp.h"
11026
11027
11028
11029/** @name "Microcode" macros.
11030 *
11031 * The idea is that we should be able to use the same code to interpret
11032 * instructions as well as recompiler instructions. Thus this obfuscation.
11033 *
11034 * @{
11035 */
11036#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11037#define IEM_MC_END() }
11038#define IEM_MC_PAUSE() do {} while (0)
11039#define IEM_MC_CONTINUE() do {} while (0)
11040
11041/** Internal macro. */
11042#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11043 do \
11044 { \
11045 VBOXSTRICTRC rcStrict2 = a_Expr; \
11046 if (rcStrict2 != VINF_SUCCESS) \
11047 return rcStrict2; \
11048 } while (0)
11049
11050
11051#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11052#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11053#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11054#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11055#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11056#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11057#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11058#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11059#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11060 do { \
11061 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11062 return iemRaiseDeviceNotAvailable(pVCpu); \
11063 } while (0)
11064#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11065 do { \
11066 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11067 return iemRaiseDeviceNotAvailable(pVCpu); \
11068 } while (0)
11069#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11070 do { \
11071 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11072 return iemRaiseMathFault(pVCpu); \
11073 } while (0)
11074#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11075 do { \
11076 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11077 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11078 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11079 return iemRaiseUndefinedOpcode(pVCpu); \
11080 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11081 return iemRaiseDeviceNotAvailable(pVCpu); \
11082 } while (0)
11083#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11084 do { \
11085 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11086 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11087 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11088 return iemRaiseUndefinedOpcode(pVCpu); \
11089 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11090 return iemRaiseDeviceNotAvailable(pVCpu); \
11091 } while (0)
11092#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11093 do { \
11094 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11095 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11096 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11097 return iemRaiseUndefinedOpcode(pVCpu); \
11098 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11099 return iemRaiseDeviceNotAvailable(pVCpu); \
11100 } while (0)
11101#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11102 do { \
11103 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11104 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11105 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11106 return iemRaiseUndefinedOpcode(pVCpu); \
11107 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11108 return iemRaiseDeviceNotAvailable(pVCpu); \
11109 } while (0)
11110#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11111 do { \
11112 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11113 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11114 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11115 return iemRaiseUndefinedOpcode(pVCpu); \
11116 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11117 return iemRaiseDeviceNotAvailable(pVCpu); \
11118 } while (0)
11119#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11120 do { \
11121 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11122 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11123 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11124 return iemRaiseUndefinedOpcode(pVCpu); \
11125 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11126 return iemRaiseDeviceNotAvailable(pVCpu); \
11127 } while (0)
11128#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11129 do { \
11130 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11131 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11132 return iemRaiseUndefinedOpcode(pVCpu); \
11133 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11134 return iemRaiseDeviceNotAvailable(pVCpu); \
11135 } while (0)
11136#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11137 do { \
11138 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11139 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11140 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11141 return iemRaiseUndefinedOpcode(pVCpu); \
11142 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11143 return iemRaiseDeviceNotAvailable(pVCpu); \
11144 } while (0)
11145#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11146 do { \
11147 if (pVCpu->iem.s.uCpl != 0) \
11148 return iemRaiseGeneralProtectionFault0(pVCpu); \
11149 } while (0)
11150#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11151 do { \
11152 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11153 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11154 } while (0)
11155#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11156 do { \
11157 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11158 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11159 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11160 return iemRaiseUndefinedOpcode(pVCpu); \
11161 } while (0)
11162#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11163 do { \
11164 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11165 return iemRaiseGeneralProtectionFault0(pVCpu); \
11166 } while (0)
11167
11168
11169#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11170#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11171#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11172#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11173#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11174#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11175#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11176 uint32_t a_Name; \
11177 uint32_t *a_pName = &a_Name
11178#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11179 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11180
11181#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11182#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11183
11184#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11185#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11186#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11187#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11188#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11189#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11190#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11191#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11192#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11193#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11194#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11195#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11196#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11197#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11198#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11199#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11200#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11201#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11202 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11203 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11204 } while (0)
11205#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11206 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11207 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11208 } while (0)
11209#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11210 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11211 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11212 } while (0)
11213/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11214#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11215 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11216 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11217 } while (0)
11218#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11219 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11220 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11221 } while (0)
11222/** @note Not for IOPL or IF testing or modification. */
11223#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11224#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11225#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11226#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11227
11228#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11229#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11230#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11231#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11232#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11233#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11234#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11235#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11236#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11237#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11238/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11239#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11240 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11241 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11242 } while (0)
11243#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11244 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11245 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11246 } while (0)
11247#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11248 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11249
11250
11251#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11252#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11253/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11254 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11255#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11256#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11257/** @note Not for IOPL or IF testing or modification. */
11258#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11259
11260#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11261#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11262#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11263 do { \
11264 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11265 *pu32Reg += (a_u32Value); \
11266 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11267 } while (0)
11268#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11269
11270#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11271#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11272#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11273 do { \
11274 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11275 *pu32Reg -= (a_u32Value); \
11276 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11277 } while (0)
11278#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11279#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11280
11281#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11282#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11283#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11284#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11285#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11286#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11287#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11288
11289#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11290#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11291#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11292#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11293
11294#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11295#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11296#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11297
11298#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11299#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11300#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11301
11302#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11303#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11304#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11305
11306#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11307#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11308#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11309
11310#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11311
11312#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11313
11314#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11315#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11316#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11317 do { \
11318 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11319 *pu32Reg &= (a_u32Value); \
11320 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11321 } while (0)
11322#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11323
11324#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11325#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11326#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11327 do { \
11328 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11329 *pu32Reg |= (a_u32Value); \
11330 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11331 } while (0)
11332#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11333
11334
11335/** @note Not for IOPL or IF modification. */
11336#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11337/** @note Not for IOPL or IF modification. */
11338#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11339/** @note Not for IOPL or IF modification. */
11340#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11341
11342#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11343
11344/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11345#define IEM_MC_FPU_TO_MMX_MODE() do { \
11346 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11347 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11348 } while (0)
11349
11350/** Switches the FPU state from MMX mode (FTW=0xffff). */
11351#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11352 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11353 } while (0)
11354
11355#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11356 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11357#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11358 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11359#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11360 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11361 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11362 } while (0)
11363#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11364 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11365 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11366 } while (0)
11367#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11368 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11369#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11370 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11371#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11372 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11373
11374#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11375 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11376 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11377 } while (0)
11378#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11379 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11380#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11381 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11382#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11383 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11384#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11385 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11386 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11387 } while (0)
11388#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11389 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11390#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11391 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11392 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11393 } while (0)
11394#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11395 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11396#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11397 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11398 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11399 } while (0)
11400#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11401 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11402#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11403 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11404#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11405 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11406#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11407 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11408#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11409 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11410 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11411 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11412 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11413 } while (0)
11414
11415#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11416 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11417 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11418 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11419 } while (0)
11420#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11421 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11422 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11423 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11424 } while (0)
11425#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11426 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11427 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11428 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11429 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11430 } while (0)
11431#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11432 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11433 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11434 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11435 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11436 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11437 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11438 } while (0)
11439
11440#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11441#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11442 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11443 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11444 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11445 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11446 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11447 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11448 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11449 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11450 } while (0)
11451#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11452 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11453 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11454 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11455 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11456 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11457 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11458 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11459 } while (0)
11460#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11461 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11462 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11463 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11464 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11465 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11466 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11467 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11468 } while (0)
11469#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11470 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11471 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11472 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11473 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11474 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11475 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11476 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11477 } while (0)
11478
11479#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11480 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11481#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11482 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11483#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11484 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11485#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11486 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11487 uintptr_t const iYRegTmp = (a_iYReg); \
11488 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11489 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11490 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11491 } while (0)
11492
11493#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11494 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11495 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11496 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11497 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11498 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11499 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11500 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11501 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11502 } while (0)
11503#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11504 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11505 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11506 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11507 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11508 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11509 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11510 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11511 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11512 } while (0)
11513#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11514 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11515 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11516 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11517 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11518 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11519 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11520 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11521 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11522 } while (0)
11523
11524#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11525 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11526 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11527 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11528 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11529 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11530 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11531 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11532 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11533 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11534 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11535 } while (0)
11536#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11537 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11538 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11539 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11540 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11541 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11542 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11543 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11544 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11545 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11546 } while (0)
11547#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11548 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11549 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11550 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11551 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11552 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11553 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11554 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11555 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11556 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11557 } while (0)
11558#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11559 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11560 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11561 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11562 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11563 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11564 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11565 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11566 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11567 } while (0)
11568
11569#ifndef IEM_WITH_SETJMP
11570# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11571 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11572# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11573 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11574# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11575 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11576#else
11577# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11578 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11579# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11580 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11581# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11582 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11583#endif
11584
11585#ifndef IEM_WITH_SETJMP
11586# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11587 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11588# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11589 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11590# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11591 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11592#else
11593# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11594 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11595# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11596 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11597# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11598 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11599#endif
11600
11601#ifndef IEM_WITH_SETJMP
11602# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11603 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11604# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11605 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11606# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11607 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11608#else
11609# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11610 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11611# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11612 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11613# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11614 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11615#endif
11616
11617#ifdef SOME_UNUSED_FUNCTION
11618# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11619 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11620#endif
11621
11622#ifndef IEM_WITH_SETJMP
11623# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11624 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11625# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11626 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11627# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11628 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11629# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11630 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11631#else
11632# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11633 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11634# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11635 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11636# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11637 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11638# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11639 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11640#endif
11641
11642#ifndef IEM_WITH_SETJMP
11643# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11644 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11645# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11646 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11647# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11648 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11649#else
11650# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11651 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11652# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11653 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11654# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11655 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11656#endif
11657
11658#ifndef IEM_WITH_SETJMP
11659# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11660 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11661# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11662 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11663#else
11664# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11665 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11666# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11667 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11668#endif
11669
11670#ifndef IEM_WITH_SETJMP
11671# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11672 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11673# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11674 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11675#else
11676# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11677 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11678# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11679 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11680#endif
11681
11682
11683
11684#ifndef IEM_WITH_SETJMP
11685# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11686 do { \
11687 uint8_t u8Tmp; \
11688 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11689 (a_u16Dst) = u8Tmp; \
11690 } while (0)
11691# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11692 do { \
11693 uint8_t u8Tmp; \
11694 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11695 (a_u32Dst) = u8Tmp; \
11696 } while (0)
11697# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11698 do { \
11699 uint8_t u8Tmp; \
11700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11701 (a_u64Dst) = u8Tmp; \
11702 } while (0)
11703# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11704 do { \
11705 uint16_t u16Tmp; \
11706 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11707 (a_u32Dst) = u16Tmp; \
11708 } while (0)
11709# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11710 do { \
11711 uint16_t u16Tmp; \
11712 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11713 (a_u64Dst) = u16Tmp; \
11714 } while (0)
11715# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11716 do { \
11717 uint32_t u32Tmp; \
11718 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11719 (a_u64Dst) = u32Tmp; \
11720 } while (0)
11721#else /* IEM_WITH_SETJMP */
11722# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11723 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11724# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11725 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11726# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11727 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11728# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11729 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11730# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11731 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11732# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11733 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11734#endif /* IEM_WITH_SETJMP */
11735
11736#ifndef IEM_WITH_SETJMP
11737# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11738 do { \
11739 uint8_t u8Tmp; \
11740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11741 (a_u16Dst) = (int8_t)u8Tmp; \
11742 } while (0)
11743# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11744 do { \
11745 uint8_t u8Tmp; \
11746 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11747 (a_u32Dst) = (int8_t)u8Tmp; \
11748 } while (0)
11749# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11750 do { \
11751 uint8_t u8Tmp; \
11752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11753 (a_u64Dst) = (int8_t)u8Tmp; \
11754 } while (0)
11755# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11756 do { \
11757 uint16_t u16Tmp; \
11758 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11759 (a_u32Dst) = (int16_t)u16Tmp; \
11760 } while (0)
11761# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11762 do { \
11763 uint16_t u16Tmp; \
11764 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11765 (a_u64Dst) = (int16_t)u16Tmp; \
11766 } while (0)
11767# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11768 do { \
11769 uint32_t u32Tmp; \
11770 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11771 (a_u64Dst) = (int32_t)u32Tmp; \
11772 } while (0)
11773#else /* IEM_WITH_SETJMP */
11774# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11775 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11776# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11777 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11778# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11779 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11780# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11781 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11782# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11783 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11784# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11785 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11786#endif /* IEM_WITH_SETJMP */
11787
11788#ifndef IEM_WITH_SETJMP
11789# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11790 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11791# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11792 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11793# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11794 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11795# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11796 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11797#else
11798# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11799 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11800# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11801 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11802# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11803 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11804# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11805 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11806#endif
11807
11808#ifndef IEM_WITH_SETJMP
11809# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11810 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11811# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11812 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11813# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11814 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11815# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11816 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11817#else
11818# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11819 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11820# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11821 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11822# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11823 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11824# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11825 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11826#endif
11827
11828#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11829#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11830#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11831#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11832#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11833#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11834#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11835 do { \
11836 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11837 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11838 } while (0)
11839
11840#ifndef IEM_WITH_SETJMP
11841# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11842 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11843# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11844 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11845#else
11846# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11847 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11848# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11849 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11850#endif
11851
11852#ifndef IEM_WITH_SETJMP
11853# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11854 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11855# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11856 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11857#else
11858# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11859 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11860# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11861 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11862#endif
11863
11864
11865#define IEM_MC_PUSH_U16(a_u16Value) \
11866 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11867#define IEM_MC_PUSH_U32(a_u32Value) \
11868 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11869#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11870 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11871#define IEM_MC_PUSH_U64(a_u64Value) \
11872 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11873
11874#define IEM_MC_POP_U16(a_pu16Value) \
11875 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11876#define IEM_MC_POP_U32(a_pu32Value) \
11877 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11878#define IEM_MC_POP_U64(a_pu64Value) \
11879 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11880
11881/** Maps guest memory for direct or bounce buffered access.
11882 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11883 * @remarks May return.
11884 */
11885#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11886 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11887
11888/** Maps guest memory for direct or bounce buffered access.
11889 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11890 * @remarks May return.
11891 */
11892#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11893 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11894
11895/** Commits the memory and unmaps the guest memory.
11896 * @remarks May return.
11897 */
11898#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11899 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11900
11901/** Commits the memory and unmaps the guest memory unless the FPU status word
11902 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11903 * that would cause FLD not to store.
11904 *
11905 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11906 * store, while \#P will not.
11907 *
11908 * @remarks May in theory return - for now.
11909 */
11910#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11911 do { \
11912 if ( !(a_u16FSW & X86_FSW_ES) \
11913 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11914 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11915 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11916 } while (0)
11917
11918/** Calculate efficient address from R/M. */
11919#ifndef IEM_WITH_SETJMP
11920# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11921 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11922#else
11923# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11924 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11925#endif
11926
11927#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11928#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11929#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11930#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11931#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11932#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11933#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11934
11935/**
11936 * Defers the rest of the instruction emulation to a C implementation routine
11937 * and returns, only taking the standard parameters.
11938 *
11939 * @param a_pfnCImpl The pointer to the C routine.
11940 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11941 */
11942#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11943
11944/**
11945 * Defers the rest of instruction emulation to a C implementation routine and
11946 * returns, taking one argument in addition to the standard ones.
11947 *
11948 * @param a_pfnCImpl The pointer to the C routine.
11949 * @param a0 The argument.
11950 */
11951#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11952
11953/**
11954 * Defers the rest of the instruction emulation to a C implementation routine
11955 * and returns, taking two arguments in addition to the standard ones.
11956 *
11957 * @param a_pfnCImpl The pointer to the C routine.
11958 * @param a0 The first extra argument.
11959 * @param a1 The second extra argument.
11960 */
11961#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11962
11963/**
11964 * Defers the rest of the instruction emulation to a C implementation routine
11965 * and returns, taking three arguments in addition to the standard ones.
11966 *
11967 * @param a_pfnCImpl The pointer to the C routine.
11968 * @param a0 The first extra argument.
11969 * @param a1 The second extra argument.
11970 * @param a2 The third extra argument.
11971 */
11972#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11973
11974/**
11975 * Defers the rest of the instruction emulation to a C implementation routine
11976 * and returns, taking four arguments in addition to the standard ones.
11977 *
11978 * @param a_pfnCImpl The pointer to the C routine.
11979 * @param a0 The first extra argument.
11980 * @param a1 The second extra argument.
11981 * @param a2 The third extra argument.
11982 * @param a3 The fourth extra argument.
11983 */
11984#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11985
11986/**
11987 * Defers the rest of the instruction emulation to a C implementation routine
11988 * and returns, taking two arguments in addition to the standard ones.
11989 *
11990 * @param a_pfnCImpl The pointer to the C routine.
11991 * @param a0 The first extra argument.
11992 * @param a1 The second extra argument.
11993 * @param a2 The third extra argument.
11994 * @param a3 The fourth extra argument.
11995 * @param a4 The fifth extra argument.
11996 */
11997#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11998
11999/**
12000 * Defers the entire instruction emulation to a C implementation routine and
12001 * returns, only taking the standard parameters.
12002 *
12003 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12004 *
12005 * @param a_pfnCImpl The pointer to the C routine.
12006 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12007 */
12008#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12009
12010/**
12011 * Defers the entire instruction emulation to a C implementation routine and
12012 * returns, taking one argument in addition to the standard ones.
12013 *
12014 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12015 *
12016 * @param a_pfnCImpl The pointer to the C routine.
12017 * @param a0 The argument.
12018 */
12019#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12020
12021/**
12022 * Defers the entire instruction emulation to a C implementation routine and
12023 * returns, taking two arguments in addition to the standard ones.
12024 *
12025 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12026 *
12027 * @param a_pfnCImpl The pointer to the C routine.
12028 * @param a0 The first extra argument.
12029 * @param a1 The second extra argument.
12030 */
12031#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12032
12033/**
12034 * Defers the entire instruction emulation to a C implementation routine and
12035 * returns, taking three arguments in addition to the standard ones.
12036 *
12037 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12038 *
12039 * @param a_pfnCImpl The pointer to the C routine.
12040 * @param a0 The first extra argument.
12041 * @param a1 The second extra argument.
12042 * @param a2 The third extra argument.
12043 */
12044#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12045
12046/**
12047 * Calls a FPU assembly implementation taking one visible argument.
12048 *
12049 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12050 * @param a0 The first extra argument.
12051 */
12052#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12053 do { \
12054 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12055 } while (0)
12056
12057/**
12058 * Calls a FPU assembly implementation taking two visible arguments.
12059 *
12060 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12061 * @param a0 The first extra argument.
12062 * @param a1 The second extra argument.
12063 */
12064#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12065 do { \
12066 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12067 } while (0)
12068
12069/**
12070 * Calls a FPU assembly implementation taking three visible arguments.
12071 *
12072 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12073 * @param a0 The first extra argument.
12074 * @param a1 The second extra argument.
12075 * @param a2 The third extra argument.
12076 */
12077#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12078 do { \
12079 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12080 } while (0)
12081
12082#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12083 do { \
12084 (a_FpuData).FSW = (a_FSW); \
12085 (a_FpuData).r80Result = *(a_pr80Value); \
12086 } while (0)
12087
12088/** Pushes FPU result onto the stack. */
12089#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12090 iemFpuPushResult(pVCpu, &a_FpuData)
12091/** Pushes FPU result onto the stack and sets the FPUDP. */
12092#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12093 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12094
12095/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12096#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12097 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12098
12099/** Stores FPU result in a stack register. */
12100#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12101 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12102/** Stores FPU result in a stack register and pops the stack. */
12103#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12104 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12105/** Stores FPU result in a stack register and sets the FPUDP. */
12106#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12107 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12108/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12109 * stack. */
12110#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12111 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12112
12113/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12114#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12115 iemFpuUpdateOpcodeAndIp(pVCpu)
12116/** Free a stack register (for FFREE and FFREEP). */
12117#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12118 iemFpuStackFree(pVCpu, a_iStReg)
12119/** Increment the FPU stack pointer. */
12120#define IEM_MC_FPU_STACK_INC_TOP() \
12121 iemFpuStackIncTop(pVCpu)
12122/** Decrement the FPU stack pointer. */
12123#define IEM_MC_FPU_STACK_DEC_TOP() \
12124 iemFpuStackDecTop(pVCpu)
12125
12126/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12127#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12128 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12129/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12130#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12131 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12132/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12133#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12134 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12135/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12136#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12137 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12138/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12139 * stack. */
12140#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12141 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12142/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12143#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12144 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12145
12146/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12147#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12148 iemFpuStackUnderflow(pVCpu, a_iStDst)
12149/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12150 * stack. */
12151#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12152 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12153/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12154 * FPUDS. */
12155#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12156 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12157/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12158 * FPUDS. Pops stack. */
12159#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12160 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12161/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12162 * stack twice. */
12163#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12164 iemFpuStackUnderflowThenPopPop(pVCpu)
12165/** Raises a FPU stack underflow exception for an instruction pushing a result
12166 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12167#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12168 iemFpuStackPushUnderflow(pVCpu)
12169/** Raises a FPU stack underflow exception for an instruction pushing a result
12170 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12171#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12172 iemFpuStackPushUnderflowTwo(pVCpu)
12173
12174/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12175 * FPUIP, FPUCS and FOP. */
12176#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12177 iemFpuStackPushOverflow(pVCpu)
12178/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12179 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12180#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12181 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12182/** Prepares for using the FPU state.
12183 * Ensures that we can use the host FPU in the current context (RC+R0.
12184 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12185#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12186/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12187#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12188/** Actualizes the guest FPU state so it can be accessed and modified. */
12189#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12190
12191/** Prepares for using the SSE state.
12192 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12193 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12194#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12195/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12196#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12197/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12198#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12199
12200/** Prepares for using the AVX state.
12201 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12202 * Ensures the guest AVX state in the CPUMCTX is up to date.
12203 * @note This will include the AVX512 state too when support for it is added
12204 * due to the zero extending feature of VEX instruction. */
12205#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12206/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12207#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12208/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12209#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12210
12211/**
12212 * Calls a MMX assembly implementation taking two visible arguments.
12213 *
12214 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12215 * @param a0 The first extra argument.
12216 * @param a1 The second extra argument.
12217 */
12218#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12219 do { \
12220 IEM_MC_PREPARE_FPU_USAGE(); \
12221 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12222 } while (0)
12223
12224/**
12225 * Calls a MMX assembly implementation taking three visible arguments.
12226 *
12227 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12228 * @param a0 The first extra argument.
12229 * @param a1 The second extra argument.
12230 * @param a2 The third extra argument.
12231 */
12232#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12233 do { \
12234 IEM_MC_PREPARE_FPU_USAGE(); \
12235 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12236 } while (0)
12237
12238
12239/**
12240 * Calls a SSE assembly implementation taking two visible arguments.
12241 *
12242 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12243 * @param a0 The first extra argument.
12244 * @param a1 The second extra argument.
12245 */
12246#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12247 do { \
12248 IEM_MC_PREPARE_SSE_USAGE(); \
12249 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12250 } while (0)
12251
12252/**
12253 * Calls a SSE assembly implementation taking three visible arguments.
12254 *
12255 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12256 * @param a0 The first extra argument.
12257 * @param a1 The second extra argument.
12258 * @param a2 The third extra argument.
12259 */
12260#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12261 do { \
12262 IEM_MC_PREPARE_SSE_USAGE(); \
12263 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12264 } while (0)
12265
12266
12267/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12268 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12269#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12270 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12271
12272/**
12273 * Calls a AVX assembly implementation taking two visible arguments.
12274 *
12275 * There is one implicit zero'th argument, a pointer to the extended state.
12276 *
12277 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12278 * @param a1 The first extra argument.
12279 * @param a2 The second extra argument.
12280 */
12281#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12282 do { \
12283 IEM_MC_PREPARE_AVX_USAGE(); \
12284 a_pfnAImpl(pXState, (a1), (a2)); \
12285 } while (0)
12286
12287/**
12288 * Calls a AVX assembly implementation taking three visible arguments.
12289 *
12290 * There is one implicit zero'th argument, a pointer to the extended state.
12291 *
12292 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12293 * @param a1 The first extra argument.
12294 * @param a2 The second extra argument.
12295 * @param a3 The third extra argument.
12296 */
12297#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12298 do { \
12299 IEM_MC_PREPARE_AVX_USAGE(); \
12300 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12301 } while (0)
12302
12303/** @note Not for IOPL or IF testing. */
12304#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12305/** @note Not for IOPL or IF testing. */
12306#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12307/** @note Not for IOPL or IF testing. */
12308#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12309/** @note Not for IOPL or IF testing. */
12310#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12311/** @note Not for IOPL or IF testing. */
12312#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12313 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12314 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12315/** @note Not for IOPL or IF testing. */
12316#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12317 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12318 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12319/** @note Not for IOPL or IF testing. */
12320#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12321 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12322 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12323 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12324/** @note Not for IOPL or IF testing. */
12325#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12326 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12327 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12328 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12329#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12330#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12331#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12332/** @note Not for IOPL or IF testing. */
12333#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12334 if ( pVCpu->cpum.GstCtx.cx != 0 \
12335 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12336/** @note Not for IOPL or IF testing. */
12337#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12338 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12339 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12340/** @note Not for IOPL or IF testing. */
12341#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12342 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12343 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12344/** @note Not for IOPL or IF testing. */
12345#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12346 if ( pVCpu->cpum.GstCtx.cx != 0 \
12347 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12348/** @note Not for IOPL or IF testing. */
12349#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12350 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12351 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12352/** @note Not for IOPL or IF testing. */
12353#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12354 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12355 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12356#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12357#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12358
12359#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12360 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12361#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12362 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12363#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12364 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12365#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12366 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12367#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12368 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12369#define IEM_MC_IF_FCW_IM() \
12370 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12371
12372#define IEM_MC_ELSE() } else {
12373#define IEM_MC_ENDIF() } do {} while (0)
12374
12375/** @} */
12376
12377
12378/** @name Opcode Debug Helpers.
12379 * @{
12380 */
12381#ifdef VBOX_WITH_STATISTICS
12382# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12383#else
12384# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12385#endif
12386
12387#ifdef DEBUG
12388# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12389 do { \
12390 IEMOP_INC_STATS(a_Stats); \
12391 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12392 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12393 } while (0)
12394
12395# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12396 do { \
12397 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12398 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12399 (void)RT_CONCAT(OP_,a_Upper); \
12400 (void)(a_fDisHints); \
12401 (void)(a_fIemHints); \
12402 } while (0)
12403
12404# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12405 do { \
12406 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12407 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12408 (void)RT_CONCAT(OP_,a_Upper); \
12409 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12410 (void)(a_fDisHints); \
12411 (void)(a_fIemHints); \
12412 } while (0)
12413
12414# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12415 do { \
12416 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12417 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12418 (void)RT_CONCAT(OP_,a_Upper); \
12419 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12420 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12421 (void)(a_fDisHints); \
12422 (void)(a_fIemHints); \
12423 } while (0)
12424
12425# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12426 do { \
12427 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12428 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12429 (void)RT_CONCAT(OP_,a_Upper); \
12430 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12431 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12432 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12433 (void)(a_fDisHints); \
12434 (void)(a_fIemHints); \
12435 } while (0)
12436
12437# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12438 do { \
12439 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12440 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12441 (void)RT_CONCAT(OP_,a_Upper); \
12442 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12443 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12444 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12445 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12446 (void)(a_fDisHints); \
12447 (void)(a_fIemHints); \
12448 } while (0)
12449
12450#else
12451# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12452
12453# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12454 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12455# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12456 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12457# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12458 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12459# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12460 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12461# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12462 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12463
12464#endif
12465
12466#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12467 IEMOP_MNEMONIC0EX(a_Lower, \
12468 #a_Lower, \
12469 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12470#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12471 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12472 #a_Lower " " #a_Op1, \
12473 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12474#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12475 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12476 #a_Lower " " #a_Op1 "," #a_Op2, \
12477 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12478#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12479 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12480 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12481 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12482#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12483 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12484 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12485 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12486
12487/** @} */
12488
12489
12490/** @name Opcode Helpers.
12491 * @{
12492 */
12493
12494#ifdef IN_RING3
12495# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12496 do { \
12497 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12498 else \
12499 { \
12500 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12501 return IEMOP_RAISE_INVALID_OPCODE(); \
12502 } \
12503 } while (0)
12504#else
12505# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12506 do { \
12507 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12508 else return IEMOP_RAISE_INVALID_OPCODE(); \
12509 } while (0)
12510#endif
12511
12512/** The instruction requires a 186 or later. */
12513#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12514# define IEMOP_HLP_MIN_186() do { } while (0)
12515#else
12516# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12517#endif
12518
12519/** The instruction requires a 286 or later. */
12520#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12521# define IEMOP_HLP_MIN_286() do { } while (0)
12522#else
12523# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12524#endif
12525
12526/** The instruction requires a 386 or later. */
12527#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12528# define IEMOP_HLP_MIN_386() do { } while (0)
12529#else
12530# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12531#endif
12532
12533/** The instruction requires a 386 or later if the given expression is true. */
12534#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12535# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12536#else
12537# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12538#endif
12539
12540/** The instruction requires a 486 or later. */
12541#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12542# define IEMOP_HLP_MIN_486() do { } while (0)
12543#else
12544# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12545#endif
12546
12547/** The instruction requires a Pentium (586) or later. */
12548#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12549# define IEMOP_HLP_MIN_586() do { } while (0)
12550#else
12551# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12552#endif
12553
12554/** The instruction requires a PentiumPro (686) or later. */
12555#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12556# define IEMOP_HLP_MIN_686() do { } while (0)
12557#else
12558# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12559#endif
12560
12561
12562/** The instruction raises an \#UD in real and V8086 mode. */
12563#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12564 do \
12565 { \
12566 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12567 else return IEMOP_RAISE_INVALID_OPCODE(); \
12568 } while (0)
12569
12570#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12571/** This instruction raises an \#UD in real and V8086 mode or when not using a
12572 * 64-bit code segment when in long mode (applicable to all VMX instructions
12573 * except VMCALL). */
12574# define IEMOP_HLP_VMX_INSTR() \
12575 do \
12576 { \
12577 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12578 && ( !IEM_IS_LONG_MODE(pVCpu) \
12579 || IEM_IS_64BIT_CODE(pVCpu))) \
12580 { /* likely */ } \
12581 else \
12582 return IEMOP_RAISE_INVALID_OPCODE(); \
12583 } while (0)
12584
12585/** The instruction can only be executed in VMX operation (VMX root mode and
12586 * non-root mode).
12587 */
12588# define IEMOP_HLP_IN_VMX_OPERATION() \
12589 do \
12590 { \
12591 if (IEM_IS_VMX_ROOT_MODE(pVCpu)) { /* likely */ } \
12592 else return IEMOP_RAISE_INVALID_OPCODE(); \
12593 } while (0)
12594#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12595
12596/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12597 * 64-bit mode. */
12598#define IEMOP_HLP_NO_64BIT() \
12599 do \
12600 { \
12601 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12602 return IEMOP_RAISE_INVALID_OPCODE(); \
12603 } while (0)
12604
12605/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12606 * 64-bit mode. */
12607#define IEMOP_HLP_ONLY_64BIT() \
12608 do \
12609 { \
12610 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12611 return IEMOP_RAISE_INVALID_OPCODE(); \
12612 } while (0)
12613
12614/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12615#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12616 do \
12617 { \
12618 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12619 iemRecalEffOpSize64Default(pVCpu); \
12620 } while (0)
12621
12622/** The instruction has 64-bit operand size if 64-bit mode. */
12623#define IEMOP_HLP_64BIT_OP_SIZE() \
12624 do \
12625 { \
12626 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12627 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12628 } while (0)
12629
12630/** Only a REX prefix immediately preceeding the first opcode byte takes
12631 * effect. This macro helps ensuring this as well as logging bad guest code. */
12632#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12633 do \
12634 { \
12635 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12636 { \
12637 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12638 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12639 pVCpu->iem.s.uRexB = 0; \
12640 pVCpu->iem.s.uRexIndex = 0; \
12641 pVCpu->iem.s.uRexReg = 0; \
12642 iemRecalEffOpSize(pVCpu); \
12643 } \
12644 } while (0)
12645
12646/**
12647 * Done decoding.
12648 */
12649#define IEMOP_HLP_DONE_DECODING() \
12650 do \
12651 { \
12652 /*nothing for now, maybe later... */ \
12653 } while (0)
12654
12655/**
12656 * Done decoding, raise \#UD exception if lock prefix present.
12657 */
12658#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12659 do \
12660 { \
12661 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12662 { /* likely */ } \
12663 else \
12664 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12665 } while (0)
12666
12667
12668/**
12669 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12670 * repnz or size prefixes are present, or if in real or v8086 mode.
12671 */
12672#define IEMOP_HLP_DONE_VEX_DECODING() \
12673 do \
12674 { \
12675 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12676 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12677 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12678 { /* likely */ } \
12679 else \
12680 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12681 } while (0)
12682
12683/**
12684 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12685 * repnz or size prefixes are present, or if in real or v8086 mode.
12686 */
12687#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12688 do \
12689 { \
12690 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12691 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12692 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12693 && pVCpu->iem.s.uVexLength == 0)) \
12694 { /* likely */ } \
12695 else \
12696 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12697 } while (0)
12698
12699
12700/**
12701 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12702 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12703 * register 0, or if in real or v8086 mode.
12704 */
12705#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12706 do \
12707 { \
12708 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12709 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12710 && !pVCpu->iem.s.uVex3rdReg \
12711 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12712 { /* likely */ } \
12713 else \
12714 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12715 } while (0)
12716
12717/**
12718 * Done decoding VEX, no V, L=0.
12719 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12720 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12721 */
12722#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12723 do \
12724 { \
12725 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12726 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12727 && pVCpu->iem.s.uVexLength == 0 \
12728 && pVCpu->iem.s.uVex3rdReg == 0 \
12729 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12730 { /* likely */ } \
12731 else \
12732 return IEMOP_RAISE_INVALID_OPCODE(); \
12733 } while (0)
12734
12735#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12736 do \
12737 { \
12738 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12739 { /* likely */ } \
12740 else \
12741 { \
12742 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12743 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12744 } \
12745 } while (0)
12746#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12747 do \
12748 { \
12749 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12750 { /* likely */ } \
12751 else \
12752 { \
12753 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12754 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12755 } \
12756 } while (0)
12757
12758/**
12759 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12760 * are present.
12761 */
12762#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12763 do \
12764 { \
12765 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12766 { /* likely */ } \
12767 else \
12768 return IEMOP_RAISE_INVALID_OPCODE(); \
12769 } while (0)
12770
12771/**
12772 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12773 * prefixes are present.
12774 */
12775#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12776 do \
12777 { \
12778 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12779 { /* likely */ } \
12780 else \
12781 return IEMOP_RAISE_INVALID_OPCODE(); \
12782 } while (0)
12783
12784
12785/**
12786 * Calculates the effective address of a ModR/M memory operand.
12787 *
12788 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12789 *
12790 * @return Strict VBox status code.
12791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12792 * @param bRm The ModRM byte.
12793 * @param cbImm The size of any immediate following the
12794 * effective address opcode bytes. Important for
12795 * RIP relative addressing.
12796 * @param pGCPtrEff Where to return the effective address.
12797 */
12798IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12799{
12800 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12801# define SET_SS_DEF() \
12802 do \
12803 { \
12804 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12805 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12806 } while (0)
12807
12808 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12809 {
12810/** @todo Check the effective address size crap! */
12811 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12812 {
12813 uint16_t u16EffAddr;
12814
12815 /* Handle the disp16 form with no registers first. */
12816 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12817 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12818 else
12819 {
12820 /* Get the displacment. */
12821 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12822 {
12823 case 0: u16EffAddr = 0; break;
12824 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12825 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12826 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12827 }
12828
12829 /* Add the base and index registers to the disp. */
12830 switch (bRm & X86_MODRM_RM_MASK)
12831 {
12832 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12833 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12834 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12835 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12836 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12837 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12838 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12839 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12840 }
12841 }
12842
12843 *pGCPtrEff = u16EffAddr;
12844 }
12845 else
12846 {
12847 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12848 uint32_t u32EffAddr;
12849
12850 /* Handle the disp32 form with no registers first. */
12851 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12852 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12853 else
12854 {
12855 /* Get the register (or SIB) value. */
12856 switch ((bRm & X86_MODRM_RM_MASK))
12857 {
12858 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12859 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12860 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12861 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12862 case 4: /* SIB */
12863 {
12864 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12865
12866 /* Get the index and scale it. */
12867 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12868 {
12869 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12870 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12871 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12872 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12873 case 4: u32EffAddr = 0; /*none */ break;
12874 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12875 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12876 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12877 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12878 }
12879 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12880
12881 /* add base */
12882 switch (bSib & X86_SIB_BASE_MASK)
12883 {
12884 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12885 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12886 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12887 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12888 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12889 case 5:
12890 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12891 {
12892 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12893 SET_SS_DEF();
12894 }
12895 else
12896 {
12897 uint32_t u32Disp;
12898 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12899 u32EffAddr += u32Disp;
12900 }
12901 break;
12902 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12903 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12904 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12905 }
12906 break;
12907 }
12908 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12909 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12910 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12911 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12912 }
12913
12914 /* Get and add the displacement. */
12915 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12916 {
12917 case 0:
12918 break;
12919 case 1:
12920 {
12921 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12922 u32EffAddr += i8Disp;
12923 break;
12924 }
12925 case 2:
12926 {
12927 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12928 u32EffAddr += u32Disp;
12929 break;
12930 }
12931 default:
12932 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12933 }
12934
12935 }
12936 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12937 *pGCPtrEff = u32EffAddr;
12938 else
12939 {
12940 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12941 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12942 }
12943 }
12944 }
12945 else
12946 {
12947 uint64_t u64EffAddr;
12948
12949 /* Handle the rip+disp32 form with no registers first. */
12950 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12951 {
12952 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12953 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12954 }
12955 else
12956 {
12957 /* Get the register (or SIB) value. */
12958 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12959 {
12960 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12961 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12962 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12963 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12964 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
12965 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12966 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12967 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12968 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12969 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12970 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12971 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12972 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12973 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
12974 /* SIB */
12975 case 4:
12976 case 12:
12977 {
12978 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12979
12980 /* Get the index and scale it. */
12981 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12982 {
12983 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12984 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12985 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12986 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12987 case 4: u64EffAddr = 0; /*none */ break;
12988 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
12989 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12990 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12991 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12992 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12993 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12994 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12995 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
12996 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12997 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12998 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
12999 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13000 }
13001 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13002
13003 /* add base */
13004 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13005 {
13006 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13007 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13008 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13009 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13010 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13011 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13012 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13013 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13014 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13015 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13016 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13017 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13018 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13019 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13020 /* complicated encodings */
13021 case 5:
13022 case 13:
13023 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13024 {
13025 if (!pVCpu->iem.s.uRexB)
13026 {
13027 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13028 SET_SS_DEF();
13029 }
13030 else
13031 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13032 }
13033 else
13034 {
13035 uint32_t u32Disp;
13036 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13037 u64EffAddr += (int32_t)u32Disp;
13038 }
13039 break;
13040 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13041 }
13042 break;
13043 }
13044 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13045 }
13046
13047 /* Get and add the displacement. */
13048 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13049 {
13050 case 0:
13051 break;
13052 case 1:
13053 {
13054 int8_t i8Disp;
13055 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13056 u64EffAddr += i8Disp;
13057 break;
13058 }
13059 case 2:
13060 {
13061 uint32_t u32Disp;
13062 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13063 u64EffAddr += (int32_t)u32Disp;
13064 break;
13065 }
13066 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13067 }
13068
13069 }
13070
13071 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13072 *pGCPtrEff = u64EffAddr;
13073 else
13074 {
13075 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13076 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13077 }
13078 }
13079
13080 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13081 return VINF_SUCCESS;
13082}
13083
13084
13085/**
13086 * Calculates the effective address of a ModR/M memory operand.
13087 *
13088 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13089 *
13090 * @return Strict VBox status code.
13091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13092 * @param bRm The ModRM byte.
13093 * @param cbImm The size of any immediate following the
13094 * effective address opcode bytes. Important for
13095 * RIP relative addressing.
13096 * @param pGCPtrEff Where to return the effective address.
13097 * @param offRsp RSP displacement.
13098 */
13099IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13100{
13101 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13102# define SET_SS_DEF() \
13103 do \
13104 { \
13105 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13106 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13107 } while (0)
13108
13109 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13110 {
13111/** @todo Check the effective address size crap! */
13112 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13113 {
13114 uint16_t u16EffAddr;
13115
13116 /* Handle the disp16 form with no registers first. */
13117 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13118 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13119 else
13120 {
13121 /* Get the displacment. */
13122 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13123 {
13124 case 0: u16EffAddr = 0; break;
13125 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13126 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13127 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13128 }
13129
13130 /* Add the base and index registers to the disp. */
13131 switch (bRm & X86_MODRM_RM_MASK)
13132 {
13133 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13134 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13135 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13136 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13137 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13138 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13139 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13140 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13141 }
13142 }
13143
13144 *pGCPtrEff = u16EffAddr;
13145 }
13146 else
13147 {
13148 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13149 uint32_t u32EffAddr;
13150
13151 /* Handle the disp32 form with no registers first. */
13152 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13153 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13154 else
13155 {
13156 /* Get the register (or SIB) value. */
13157 switch ((bRm & X86_MODRM_RM_MASK))
13158 {
13159 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13160 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13161 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13162 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13163 case 4: /* SIB */
13164 {
13165 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13166
13167 /* Get the index and scale it. */
13168 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13169 {
13170 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13171 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13172 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13173 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13174 case 4: u32EffAddr = 0; /*none */ break;
13175 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13176 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13177 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13178 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13179 }
13180 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13181
13182 /* add base */
13183 switch (bSib & X86_SIB_BASE_MASK)
13184 {
13185 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13186 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13187 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13188 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13189 case 4:
13190 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13191 SET_SS_DEF();
13192 break;
13193 case 5:
13194 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13195 {
13196 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13197 SET_SS_DEF();
13198 }
13199 else
13200 {
13201 uint32_t u32Disp;
13202 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13203 u32EffAddr += u32Disp;
13204 }
13205 break;
13206 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13207 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13208 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13209 }
13210 break;
13211 }
13212 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13213 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13214 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13215 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13216 }
13217
13218 /* Get and add the displacement. */
13219 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13220 {
13221 case 0:
13222 break;
13223 case 1:
13224 {
13225 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13226 u32EffAddr += i8Disp;
13227 break;
13228 }
13229 case 2:
13230 {
13231 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13232 u32EffAddr += u32Disp;
13233 break;
13234 }
13235 default:
13236 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13237 }
13238
13239 }
13240 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13241 *pGCPtrEff = u32EffAddr;
13242 else
13243 {
13244 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13245 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13246 }
13247 }
13248 }
13249 else
13250 {
13251 uint64_t u64EffAddr;
13252
13253 /* Handle the rip+disp32 form with no registers first. */
13254 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13255 {
13256 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13257 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13258 }
13259 else
13260 {
13261 /* Get the register (or SIB) value. */
13262 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13263 {
13264 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13265 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13266 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13267 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13268 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13269 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13270 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13271 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13272 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13273 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13274 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13275 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13276 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13277 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13278 /* SIB */
13279 case 4:
13280 case 12:
13281 {
13282 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13283
13284 /* Get the index and scale it. */
13285 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13286 {
13287 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13288 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13289 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13290 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13291 case 4: u64EffAddr = 0; /*none */ break;
13292 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13293 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13294 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13295 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13296 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13297 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13298 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13299 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13300 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13301 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13302 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13303 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13304 }
13305 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13306
13307 /* add base */
13308 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13309 {
13310 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13311 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13312 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13313 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13314 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13315 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13316 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13317 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13318 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13319 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13320 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13321 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13322 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13323 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13324 /* complicated encodings */
13325 case 5:
13326 case 13:
13327 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13328 {
13329 if (!pVCpu->iem.s.uRexB)
13330 {
13331 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13332 SET_SS_DEF();
13333 }
13334 else
13335 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13336 }
13337 else
13338 {
13339 uint32_t u32Disp;
13340 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13341 u64EffAddr += (int32_t)u32Disp;
13342 }
13343 break;
13344 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13345 }
13346 break;
13347 }
13348 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13349 }
13350
13351 /* Get and add the displacement. */
13352 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13353 {
13354 case 0:
13355 break;
13356 case 1:
13357 {
13358 int8_t i8Disp;
13359 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13360 u64EffAddr += i8Disp;
13361 break;
13362 }
13363 case 2:
13364 {
13365 uint32_t u32Disp;
13366 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13367 u64EffAddr += (int32_t)u32Disp;
13368 break;
13369 }
13370 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13371 }
13372
13373 }
13374
13375 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13376 *pGCPtrEff = u64EffAddr;
13377 else
13378 {
13379 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13380 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13381 }
13382 }
13383
13384 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13385 return VINF_SUCCESS;
13386}
13387
13388
13389#ifdef IEM_WITH_SETJMP
13390/**
13391 * Calculates the effective address of a ModR/M memory operand.
13392 *
13393 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13394 *
13395 * May longjmp on internal error.
13396 *
13397 * @return The effective address.
13398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13399 * @param bRm The ModRM byte.
13400 * @param cbImm The size of any immediate following the
13401 * effective address opcode bytes. Important for
13402 * RIP relative addressing.
13403 */
13404IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13405{
13406 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13407# define SET_SS_DEF() \
13408 do \
13409 { \
13410 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13411 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13412 } while (0)
13413
13414 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13415 {
13416/** @todo Check the effective address size crap! */
13417 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13418 {
13419 uint16_t u16EffAddr;
13420
13421 /* Handle the disp16 form with no registers first. */
13422 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13423 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13424 else
13425 {
13426 /* Get the displacment. */
13427 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13428 {
13429 case 0: u16EffAddr = 0; break;
13430 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13431 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13432 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13433 }
13434
13435 /* Add the base and index registers to the disp. */
13436 switch (bRm & X86_MODRM_RM_MASK)
13437 {
13438 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13439 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13440 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13441 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13442 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13443 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13444 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13445 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13446 }
13447 }
13448
13449 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13450 return u16EffAddr;
13451 }
13452
13453 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13454 uint32_t u32EffAddr;
13455
13456 /* Handle the disp32 form with no registers first. */
13457 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13458 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13459 else
13460 {
13461 /* Get the register (or SIB) value. */
13462 switch ((bRm & X86_MODRM_RM_MASK))
13463 {
13464 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13465 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13466 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13467 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13468 case 4: /* SIB */
13469 {
13470 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13471
13472 /* Get the index and scale it. */
13473 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13474 {
13475 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13476 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13477 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13478 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13479 case 4: u32EffAddr = 0; /*none */ break;
13480 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13481 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13482 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13483 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13484 }
13485 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13486
13487 /* add base */
13488 switch (bSib & X86_SIB_BASE_MASK)
13489 {
13490 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13491 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13492 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13493 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13494 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13495 case 5:
13496 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13497 {
13498 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13499 SET_SS_DEF();
13500 }
13501 else
13502 {
13503 uint32_t u32Disp;
13504 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13505 u32EffAddr += u32Disp;
13506 }
13507 break;
13508 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13509 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13510 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13511 }
13512 break;
13513 }
13514 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13515 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13516 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13517 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13518 }
13519
13520 /* Get and add the displacement. */
13521 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13522 {
13523 case 0:
13524 break;
13525 case 1:
13526 {
13527 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13528 u32EffAddr += i8Disp;
13529 break;
13530 }
13531 case 2:
13532 {
13533 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13534 u32EffAddr += u32Disp;
13535 break;
13536 }
13537 default:
13538 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13539 }
13540 }
13541
13542 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13543 {
13544 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13545 return u32EffAddr;
13546 }
13547 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13548 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13549 return u32EffAddr & UINT16_MAX;
13550 }
13551
13552 uint64_t u64EffAddr;
13553
13554 /* Handle the rip+disp32 form with no registers first. */
13555 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13556 {
13557 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13558 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13559 }
13560 else
13561 {
13562 /* Get the register (or SIB) value. */
13563 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13564 {
13565 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13566 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13567 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13568 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13569 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13570 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13571 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13572 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13573 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13574 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13575 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13576 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13577 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13578 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13579 /* SIB */
13580 case 4:
13581 case 12:
13582 {
13583 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13584
13585 /* Get the index and scale it. */
13586 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13587 {
13588 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13589 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13590 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13591 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13592 case 4: u64EffAddr = 0; /*none */ break;
13593 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13594 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13595 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13596 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13597 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13598 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13599 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13600 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13601 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13602 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13603 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13604 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13605 }
13606 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13607
13608 /* add base */
13609 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13610 {
13611 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13612 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13613 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13614 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13615 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13616 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13617 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13618 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13619 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13620 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13621 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13622 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13623 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13624 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13625 /* complicated encodings */
13626 case 5:
13627 case 13:
13628 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13629 {
13630 if (!pVCpu->iem.s.uRexB)
13631 {
13632 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13633 SET_SS_DEF();
13634 }
13635 else
13636 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13637 }
13638 else
13639 {
13640 uint32_t u32Disp;
13641 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13642 u64EffAddr += (int32_t)u32Disp;
13643 }
13644 break;
13645 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13646 }
13647 break;
13648 }
13649 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13650 }
13651
13652 /* Get and add the displacement. */
13653 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13654 {
13655 case 0:
13656 break;
13657 case 1:
13658 {
13659 int8_t i8Disp;
13660 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13661 u64EffAddr += i8Disp;
13662 break;
13663 }
13664 case 2:
13665 {
13666 uint32_t u32Disp;
13667 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13668 u64EffAddr += (int32_t)u32Disp;
13669 break;
13670 }
13671 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13672 }
13673
13674 }
13675
13676 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13677 {
13678 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13679 return u64EffAddr;
13680 }
13681 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13682 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13683 return u64EffAddr & UINT32_MAX;
13684}
13685#endif /* IEM_WITH_SETJMP */
13686
13687/** @} */
13688
13689
13690
13691/*
13692 * Include the instructions
13693 */
13694#include "IEMAllInstructions.cpp.h"
13695
13696
13697
13698#ifdef LOG_ENABLED
13699/**
13700 * Logs the current instruction.
13701 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13702 * @param fSameCtx Set if we have the same context information as the VMM,
13703 * clear if we may have already executed an instruction in
13704 * our debug context. When clear, we assume IEMCPU holds
13705 * valid CPU mode info.
13706 *
13707 * The @a fSameCtx parameter is now misleading and obsolete.
13708 * @param pszFunction The IEM function doing the execution.
13709 */
13710IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13711{
13712# ifdef IN_RING3
13713 if (LogIs2Enabled())
13714 {
13715 char szInstr[256];
13716 uint32_t cbInstr = 0;
13717 if (fSameCtx)
13718 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13719 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13720 szInstr, sizeof(szInstr), &cbInstr);
13721 else
13722 {
13723 uint32_t fFlags = 0;
13724 switch (pVCpu->iem.s.enmCpuMode)
13725 {
13726 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13727 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13728 case IEMMODE_16BIT:
13729 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13730 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13731 else
13732 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13733 break;
13734 }
13735 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13736 szInstr, sizeof(szInstr), &cbInstr);
13737 }
13738
13739 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13740 Log2(("**** %s\n"
13741 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13742 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13743 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13744 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13745 " %s\n"
13746 , pszFunction,
13747 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13748 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13749 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13750 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13751 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13752 szInstr));
13753
13754 if (LogIs3Enabled())
13755 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13756 }
13757 else
13758# endif
13759 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13760 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13761 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13762}
13763#endif /* LOG_ENABLED */
13764
13765
13766/**
13767 * Makes status code addjustments (pass up from I/O and access handler)
13768 * as well as maintaining statistics.
13769 *
13770 * @returns Strict VBox status code to pass up.
13771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13772 * @param rcStrict The status from executing an instruction.
13773 */
13774DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13775{
13776 if (rcStrict != VINF_SUCCESS)
13777 {
13778 if (RT_SUCCESS(rcStrict))
13779 {
13780 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13781 || rcStrict == VINF_IOM_R3_IOPORT_READ
13782 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13783 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13784 || rcStrict == VINF_IOM_R3_MMIO_READ
13785 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13786 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13787 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13788 || rcStrict == VINF_CPUM_R3_MSR_READ
13789 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13790 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13791 || rcStrict == VINF_EM_RAW_TO_R3
13792 || rcStrict == VINF_EM_TRIPLE_FAULT
13793 || rcStrict == VINF_GIM_R3_HYPERCALL
13794 /* raw-mode / virt handlers only: */
13795 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13796 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13797 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13798 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13799 || rcStrict == VINF_SELM_SYNC_GDT
13800 || rcStrict == VINF_CSAM_PENDING_ACTION
13801 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13802 /* nested hw.virt codes: */
13803 || rcStrict == VINF_SVM_VMEXIT
13804 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13805/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13806 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13807#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13808 if ( rcStrict == VINF_SVM_VMEXIT
13809 && rcPassUp == VINF_SUCCESS)
13810 rcStrict = VINF_SUCCESS;
13811 else
13812#endif
13813 if (rcPassUp == VINF_SUCCESS)
13814 pVCpu->iem.s.cRetInfStatuses++;
13815 else if ( rcPassUp < VINF_EM_FIRST
13816 || rcPassUp > VINF_EM_LAST
13817 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13818 {
13819 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13820 pVCpu->iem.s.cRetPassUpStatus++;
13821 rcStrict = rcPassUp;
13822 }
13823 else
13824 {
13825 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13826 pVCpu->iem.s.cRetInfStatuses++;
13827 }
13828 }
13829 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13830 pVCpu->iem.s.cRetAspectNotImplemented++;
13831 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13832 pVCpu->iem.s.cRetInstrNotImplemented++;
13833 else
13834 pVCpu->iem.s.cRetErrStatuses++;
13835 }
13836 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13837 {
13838 pVCpu->iem.s.cRetPassUpStatus++;
13839 rcStrict = pVCpu->iem.s.rcPassUp;
13840 }
13841
13842 return rcStrict;
13843}
13844
13845
13846/**
13847 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13848 * IEMExecOneWithPrefetchedByPC.
13849 *
13850 * Similar code is found in IEMExecLots.
13851 *
13852 * @return Strict VBox status code.
13853 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13854 * @param fExecuteInhibit If set, execute the instruction following CLI,
13855 * POP SS and MOV SS,GR.
13856 * @param pszFunction The calling function name.
13857 */
13858DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13859{
13860 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13861 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13862 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13863 RT_NOREF_PV(pszFunction);
13864
13865#ifdef IEM_WITH_SETJMP
13866 VBOXSTRICTRC rcStrict;
13867 jmp_buf JmpBuf;
13868 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13869 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13870 if ((rcStrict = setjmp(JmpBuf)) == 0)
13871 {
13872 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13873 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13874 }
13875 else
13876 pVCpu->iem.s.cLongJumps++;
13877 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13878#else
13879 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13880 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13881#endif
13882 if (rcStrict == VINF_SUCCESS)
13883 pVCpu->iem.s.cInstructions++;
13884 if (pVCpu->iem.s.cActiveMappings > 0)
13885 {
13886 Assert(rcStrict != VINF_SUCCESS);
13887 iemMemRollback(pVCpu);
13888 }
13889 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13890 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13891 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13892
13893//#ifdef DEBUG
13894// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13895//#endif
13896
13897 /* Execute the next instruction as well if a cli, pop ss or
13898 mov ss, Gr has just completed successfully. */
13899 if ( fExecuteInhibit
13900 && rcStrict == VINF_SUCCESS
13901 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13902 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
13903 {
13904 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13905 if (rcStrict == VINF_SUCCESS)
13906 {
13907#ifdef LOG_ENABLED
13908 iemLogCurInstr(pVCpu, false, pszFunction);
13909#endif
13910#ifdef IEM_WITH_SETJMP
13911 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13912 if ((rcStrict = setjmp(JmpBuf)) == 0)
13913 {
13914 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13915 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13916 }
13917 else
13918 pVCpu->iem.s.cLongJumps++;
13919 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13920#else
13921 IEM_OPCODE_GET_NEXT_U8(&b);
13922 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13923#endif
13924 if (rcStrict == VINF_SUCCESS)
13925 pVCpu->iem.s.cInstructions++;
13926 if (pVCpu->iem.s.cActiveMappings > 0)
13927 {
13928 Assert(rcStrict != VINF_SUCCESS);
13929 iemMemRollback(pVCpu);
13930 }
13931 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13932 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13933 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13934 }
13935 else if (pVCpu->iem.s.cActiveMappings > 0)
13936 iemMemRollback(pVCpu);
13937 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13938 }
13939
13940 /*
13941 * Return value fiddling, statistics and sanity assertions.
13942 */
13943 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13944
13945 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
13946 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
13947 return rcStrict;
13948}
13949
13950
13951#ifdef IN_RC
13952/**
13953 * Re-enters raw-mode or ensure we return to ring-3.
13954 *
13955 * @returns rcStrict, maybe modified.
13956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13957 * @param rcStrict The status code returne by the interpreter.
13958 */
13959DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13960{
13961 if ( !pVCpu->iem.s.fInPatchCode
13962 && ( rcStrict == VINF_SUCCESS
13963 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13964 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13965 {
13966 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13967 CPUMRawEnter(pVCpu);
13968 else
13969 {
13970 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13971 rcStrict = VINF_EM_RESCHEDULE;
13972 }
13973 }
13974 return rcStrict;
13975}
13976#endif
13977
13978
13979/**
13980 * Execute one instruction.
13981 *
13982 * @return Strict VBox status code.
13983 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13984 */
13985VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13986{
13987#ifdef LOG_ENABLED
13988 iemLogCurInstr(pVCpu, true, "IEMExecOne");
13989#endif
13990
13991 /*
13992 * Do the decoding and emulation.
13993 */
13994 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13995 if (rcStrict == VINF_SUCCESS)
13996 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
13997 else if (pVCpu->iem.s.cActiveMappings > 0)
13998 iemMemRollback(pVCpu);
13999
14000#ifdef IN_RC
14001 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14002#endif
14003 if (rcStrict != VINF_SUCCESS)
14004 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14005 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14006 return rcStrict;
14007}
14008
14009
14010VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14011{
14012 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14013
14014 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14015 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14016 if (rcStrict == VINF_SUCCESS)
14017 {
14018 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14019 if (pcbWritten)
14020 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14021 }
14022 else if (pVCpu->iem.s.cActiveMappings > 0)
14023 iemMemRollback(pVCpu);
14024
14025#ifdef IN_RC
14026 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14027#endif
14028 return rcStrict;
14029}
14030
14031
14032VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14033 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14034{
14035 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14036
14037 VBOXSTRICTRC rcStrict;
14038 if ( cbOpcodeBytes
14039 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14040 {
14041 iemInitDecoder(pVCpu, false);
14042#ifdef IEM_WITH_CODE_TLB
14043 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14044 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14045 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14046 pVCpu->iem.s.offCurInstrStart = 0;
14047 pVCpu->iem.s.offInstrNextByte = 0;
14048#else
14049 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14050 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14051#endif
14052 rcStrict = VINF_SUCCESS;
14053 }
14054 else
14055 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14056 if (rcStrict == VINF_SUCCESS)
14057 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14058 else if (pVCpu->iem.s.cActiveMappings > 0)
14059 iemMemRollback(pVCpu);
14060
14061#ifdef IN_RC
14062 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14063#endif
14064 return rcStrict;
14065}
14066
14067
14068VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14069{
14070 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14071
14072 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14073 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14074 if (rcStrict == VINF_SUCCESS)
14075 {
14076 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14077 if (pcbWritten)
14078 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14079 }
14080 else if (pVCpu->iem.s.cActiveMappings > 0)
14081 iemMemRollback(pVCpu);
14082
14083#ifdef IN_RC
14084 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14085#endif
14086 return rcStrict;
14087}
14088
14089
14090VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14091 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14092{
14093 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14094
14095 VBOXSTRICTRC rcStrict;
14096 if ( cbOpcodeBytes
14097 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14098 {
14099 iemInitDecoder(pVCpu, true);
14100#ifdef IEM_WITH_CODE_TLB
14101 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14102 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14103 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14104 pVCpu->iem.s.offCurInstrStart = 0;
14105 pVCpu->iem.s.offInstrNextByte = 0;
14106#else
14107 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14108 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14109#endif
14110 rcStrict = VINF_SUCCESS;
14111 }
14112 else
14113 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14114 if (rcStrict == VINF_SUCCESS)
14115 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14116 else if (pVCpu->iem.s.cActiveMappings > 0)
14117 iemMemRollback(pVCpu);
14118
14119#ifdef IN_RC
14120 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14121#endif
14122 return rcStrict;
14123}
14124
14125
14126/**
14127 * For debugging DISGetParamSize, may come in handy.
14128 *
14129 * @returns Strict VBox status code.
14130 * @param pVCpu The cross context virtual CPU structure of the
14131 * calling EMT.
14132 * @param pCtxCore The context core structure.
14133 * @param OpcodeBytesPC The PC of the opcode bytes.
14134 * @param pvOpcodeBytes Prefeched opcode bytes.
14135 * @param cbOpcodeBytes Number of prefetched bytes.
14136 * @param pcbWritten Where to return the number of bytes written.
14137 * Optional.
14138 */
14139VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14140 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14141 uint32_t *pcbWritten)
14142{
14143 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14144
14145 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14146 VBOXSTRICTRC rcStrict;
14147 if ( cbOpcodeBytes
14148 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14149 {
14150 iemInitDecoder(pVCpu, true);
14151#ifdef IEM_WITH_CODE_TLB
14152 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14153 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14154 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14155 pVCpu->iem.s.offCurInstrStart = 0;
14156 pVCpu->iem.s.offInstrNextByte = 0;
14157#else
14158 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14159 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14160#endif
14161 rcStrict = VINF_SUCCESS;
14162 }
14163 else
14164 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14165 if (rcStrict == VINF_SUCCESS)
14166 {
14167 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14168 if (pcbWritten)
14169 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14170 }
14171 else if (pVCpu->iem.s.cActiveMappings > 0)
14172 iemMemRollback(pVCpu);
14173
14174#ifdef IN_RC
14175 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14176#endif
14177 return rcStrict;
14178}
14179
14180
14181VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14182{
14183 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14184
14185 /*
14186 * See if there is an interrupt pending in TRPM, inject it if we can.
14187 */
14188 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14189#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14190 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif;
14191 if (fIntrEnabled)
14192 {
14193 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14194 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu));
14195 else
14196 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14197 }
14198#else
14199 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14200#endif
14201 if ( fIntrEnabled
14202 && TRPMHasTrap(pVCpu)
14203 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14204 {
14205 uint8_t u8TrapNo;
14206 TRPMEVENT enmType;
14207 RTGCUINT uErrCode;
14208 RTGCPTR uCr2;
14209 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14210 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14211 TRPMResetTrap(pVCpu);
14212 }
14213
14214 /*
14215 * Initial decoder init w/ prefetch, then setup setjmp.
14216 */
14217 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14218 if (rcStrict == VINF_SUCCESS)
14219 {
14220#ifdef IEM_WITH_SETJMP
14221 jmp_buf JmpBuf;
14222 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14223 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14224 pVCpu->iem.s.cActiveMappings = 0;
14225 if ((rcStrict = setjmp(JmpBuf)) == 0)
14226#endif
14227 {
14228 /*
14229 * The run loop. We limit ourselves to 4096 instructions right now.
14230 */
14231 PVM pVM = pVCpu->CTX_SUFF(pVM);
14232 uint32_t cInstr = 4096;
14233 for (;;)
14234 {
14235 /*
14236 * Log the state.
14237 */
14238#ifdef LOG_ENABLED
14239 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14240#endif
14241
14242 /*
14243 * Do the decoding and emulation.
14244 */
14245 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14246 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14247 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14248 {
14249 Assert(pVCpu->iem.s.cActiveMappings == 0);
14250 pVCpu->iem.s.cInstructions++;
14251 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14252 {
14253 uint32_t fCpu = pVCpu->fLocalForcedActions
14254 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14255 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14256 | VMCPU_FF_TLB_FLUSH
14257#ifdef VBOX_WITH_RAW_MODE
14258 | VMCPU_FF_TRPM_SYNC_IDT
14259 | VMCPU_FF_SELM_SYNC_TSS
14260 | VMCPU_FF_SELM_SYNC_GDT
14261 | VMCPU_FF_SELM_SYNC_LDT
14262#endif
14263 | VMCPU_FF_INHIBIT_INTERRUPTS
14264 | VMCPU_FF_BLOCK_NMIS
14265 | VMCPU_FF_UNHALT ));
14266
14267 if (RT_LIKELY( ( !fCpu
14268 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14269 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14270 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14271 {
14272 if (cInstr-- > 0)
14273 {
14274 Assert(pVCpu->iem.s.cActiveMappings == 0);
14275 iemReInitDecoder(pVCpu);
14276 continue;
14277 }
14278 }
14279 }
14280 Assert(pVCpu->iem.s.cActiveMappings == 0);
14281 }
14282 else if (pVCpu->iem.s.cActiveMappings > 0)
14283 iemMemRollback(pVCpu);
14284 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14285 break;
14286 }
14287 }
14288#ifdef IEM_WITH_SETJMP
14289 else
14290 {
14291 if (pVCpu->iem.s.cActiveMappings > 0)
14292 iemMemRollback(pVCpu);
14293 pVCpu->iem.s.cLongJumps++;
14294 }
14295 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14296#endif
14297
14298 /*
14299 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14300 */
14301 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14302 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14303 }
14304 else
14305 {
14306 if (pVCpu->iem.s.cActiveMappings > 0)
14307 iemMemRollback(pVCpu);
14308
14309#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14310 /*
14311 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14312 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14313 */
14314 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14315#endif
14316 }
14317
14318 /*
14319 * Maybe re-enter raw-mode and log.
14320 */
14321#ifdef IN_RC
14322 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14323#endif
14324 if (rcStrict != VINF_SUCCESS)
14325 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14326 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14327 if (pcInstructions)
14328 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14329 return rcStrict;
14330}
14331
14332
14333/**
14334 * Interface used by EMExecuteExec, does exit statistics and limits.
14335 *
14336 * @returns Strict VBox status code.
14337 * @param pVCpu The cross context virtual CPU structure.
14338 * @param fWillExit To be defined.
14339 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14340 * @param cMaxInstructions Maximum number of instructions to execute.
14341 * @param cMaxInstructionsWithoutExits
14342 * The max number of instructions without exits.
14343 * @param pStats Where to return statistics.
14344 */
14345VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14346 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14347{
14348 NOREF(fWillExit); /** @todo define flexible exit crits */
14349
14350 /*
14351 * Initialize return stats.
14352 */
14353 pStats->cInstructions = 0;
14354 pStats->cExits = 0;
14355 pStats->cMaxExitDistance = 0;
14356 pStats->cReserved = 0;
14357
14358 /*
14359 * Initial decoder init w/ prefetch, then setup setjmp.
14360 */
14361 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14362 if (rcStrict == VINF_SUCCESS)
14363 {
14364#ifdef IEM_WITH_SETJMP
14365 jmp_buf JmpBuf;
14366 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14367 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14368 pVCpu->iem.s.cActiveMappings = 0;
14369 if ((rcStrict = setjmp(JmpBuf)) == 0)
14370#endif
14371 {
14372#ifdef IN_RING0
14373 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14374#endif
14375 uint32_t cInstructionSinceLastExit = 0;
14376
14377 /*
14378 * The run loop. We limit ourselves to 4096 instructions right now.
14379 */
14380 PVM pVM = pVCpu->CTX_SUFF(pVM);
14381 for (;;)
14382 {
14383 /*
14384 * Log the state.
14385 */
14386#ifdef LOG_ENABLED
14387 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14388#endif
14389
14390 /*
14391 * Do the decoding and emulation.
14392 */
14393 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14394
14395 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14396 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14397
14398 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14399 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14400 {
14401 pStats->cExits += 1;
14402 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14403 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14404 cInstructionSinceLastExit = 0;
14405 }
14406
14407 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14408 {
14409 Assert(pVCpu->iem.s.cActiveMappings == 0);
14410 pVCpu->iem.s.cInstructions++;
14411 pStats->cInstructions++;
14412 cInstructionSinceLastExit++;
14413 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14414 {
14415 uint32_t fCpu = pVCpu->fLocalForcedActions
14416 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14417 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14418 | VMCPU_FF_TLB_FLUSH
14419#ifdef VBOX_WITH_RAW_MODE
14420 | VMCPU_FF_TRPM_SYNC_IDT
14421 | VMCPU_FF_SELM_SYNC_TSS
14422 | VMCPU_FF_SELM_SYNC_GDT
14423 | VMCPU_FF_SELM_SYNC_LDT
14424#endif
14425 | VMCPU_FF_INHIBIT_INTERRUPTS
14426 | VMCPU_FF_BLOCK_NMIS
14427 | VMCPU_FF_UNHALT ));
14428
14429 if (RT_LIKELY( ( ( !fCpu
14430 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14431 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14432 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) )
14433 || pStats->cInstructions < cMinInstructions))
14434 {
14435 if (pStats->cInstructions < cMaxInstructions)
14436 {
14437 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14438 {
14439#ifdef IN_RING0
14440 if ( !fCheckPreemptionPending
14441 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14442#endif
14443 {
14444 Assert(pVCpu->iem.s.cActiveMappings == 0);
14445 iemReInitDecoder(pVCpu);
14446 continue;
14447 }
14448#ifdef IN_RING0
14449 rcStrict = VINF_EM_RAW_INTERRUPT;
14450 break;
14451#endif
14452 }
14453 }
14454 }
14455 Assert(!(fCpu & VMCPU_FF_IEM));
14456 }
14457 Assert(pVCpu->iem.s.cActiveMappings == 0);
14458 }
14459 else if (pVCpu->iem.s.cActiveMappings > 0)
14460 iemMemRollback(pVCpu);
14461 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14462 break;
14463 }
14464 }
14465#ifdef IEM_WITH_SETJMP
14466 else
14467 {
14468 if (pVCpu->iem.s.cActiveMappings > 0)
14469 iemMemRollback(pVCpu);
14470 pVCpu->iem.s.cLongJumps++;
14471 }
14472 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14473#endif
14474
14475 /*
14476 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14477 */
14478 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14479 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14480 }
14481 else
14482 {
14483 if (pVCpu->iem.s.cActiveMappings > 0)
14484 iemMemRollback(pVCpu);
14485
14486#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14487 /*
14488 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14489 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14490 */
14491 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14492#endif
14493 }
14494
14495 /*
14496 * Maybe re-enter raw-mode and log.
14497 */
14498#ifdef IN_RC
14499 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14500#endif
14501 if (rcStrict != VINF_SUCCESS)
14502 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14503 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14504 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14505 return rcStrict;
14506}
14507
14508
14509/**
14510 * Injects a trap, fault, abort, software interrupt or external interrupt.
14511 *
14512 * The parameter list matches TRPMQueryTrapAll pretty closely.
14513 *
14514 * @returns Strict VBox status code.
14515 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14516 * @param u8TrapNo The trap number.
14517 * @param enmType What type is it (trap/fault/abort), software
14518 * interrupt or hardware interrupt.
14519 * @param uErrCode The error code if applicable.
14520 * @param uCr2 The CR2 value if applicable.
14521 * @param cbInstr The instruction length (only relevant for
14522 * software interrupts).
14523 */
14524VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14525 uint8_t cbInstr)
14526{
14527 iemInitDecoder(pVCpu, false);
14528#ifdef DBGFTRACE_ENABLED
14529 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14530 u8TrapNo, enmType, uErrCode, uCr2);
14531#endif
14532
14533 uint32_t fFlags;
14534 switch (enmType)
14535 {
14536 case TRPM_HARDWARE_INT:
14537 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14538 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14539 uErrCode = uCr2 = 0;
14540 break;
14541
14542 case TRPM_SOFTWARE_INT:
14543 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14544 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14545 uErrCode = uCr2 = 0;
14546 break;
14547
14548 case TRPM_TRAP:
14549 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14550 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14551 if (u8TrapNo == X86_XCPT_PF)
14552 fFlags |= IEM_XCPT_FLAGS_CR2;
14553 switch (u8TrapNo)
14554 {
14555 case X86_XCPT_DF:
14556 case X86_XCPT_TS:
14557 case X86_XCPT_NP:
14558 case X86_XCPT_SS:
14559 case X86_XCPT_PF:
14560 case X86_XCPT_AC:
14561 fFlags |= IEM_XCPT_FLAGS_ERR;
14562 break;
14563
14564 case X86_XCPT_NMI:
14565 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14566 break;
14567 }
14568 break;
14569
14570 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14571 }
14572
14573 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14574
14575 if (pVCpu->iem.s.cActiveMappings > 0)
14576 iemMemRollback(pVCpu);
14577
14578 return rcStrict;
14579}
14580
14581
14582/**
14583 * Injects the active TRPM event.
14584 *
14585 * @returns Strict VBox status code.
14586 * @param pVCpu The cross context virtual CPU structure.
14587 */
14588VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14589{
14590#ifndef IEM_IMPLEMENTS_TASKSWITCH
14591 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14592#else
14593 uint8_t u8TrapNo;
14594 TRPMEVENT enmType;
14595 RTGCUINT uErrCode;
14596 RTGCUINTPTR uCr2;
14597 uint8_t cbInstr;
14598 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14599 if (RT_FAILURE(rc))
14600 return rc;
14601
14602 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14603# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14604 if (rcStrict == VINF_SVM_VMEXIT)
14605 rcStrict = VINF_SUCCESS;
14606# endif
14607
14608 /** @todo Are there any other codes that imply the event was successfully
14609 * delivered to the guest? See @bugref{6607}. */
14610 if ( rcStrict == VINF_SUCCESS
14611 || rcStrict == VINF_IEM_RAISED_XCPT)
14612 TRPMResetTrap(pVCpu);
14613
14614 return rcStrict;
14615#endif
14616}
14617
14618
14619VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14620{
14621 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14622 return VERR_NOT_IMPLEMENTED;
14623}
14624
14625
14626VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14627{
14628 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14629 return VERR_NOT_IMPLEMENTED;
14630}
14631
14632
14633#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14634/**
14635 * Executes a IRET instruction with default operand size.
14636 *
14637 * This is for PATM.
14638 *
14639 * @returns VBox status code.
14640 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14641 * @param pCtxCore The register frame.
14642 */
14643VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14644{
14645 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14646
14647 iemCtxCoreToCtx(pCtx, pCtxCore);
14648 iemInitDecoder(pVCpu);
14649 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14650 if (rcStrict == VINF_SUCCESS)
14651 iemCtxToCtxCore(pCtxCore, pCtx);
14652 else
14653 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14654 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14655 return rcStrict;
14656}
14657#endif
14658
14659
14660/**
14661 * Macro used by the IEMExec* method to check the given instruction length.
14662 *
14663 * Will return on failure!
14664 *
14665 * @param a_cbInstr The given instruction length.
14666 * @param a_cbMin The minimum length.
14667 */
14668#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14669 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14670 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14671
14672
14673/**
14674 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14675 *
14676 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14677 *
14678 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14680 * @param rcStrict The status code to fiddle.
14681 */
14682DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14683{
14684 iemUninitExec(pVCpu);
14685#ifdef IN_RC
14686 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14687#else
14688 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14689#endif
14690}
14691
14692
14693/**
14694 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14695 *
14696 * This API ASSUMES that the caller has already verified that the guest code is
14697 * allowed to access the I/O port. (The I/O port is in the DX register in the
14698 * guest state.)
14699 *
14700 * @returns Strict VBox status code.
14701 * @param pVCpu The cross context virtual CPU structure.
14702 * @param cbValue The size of the I/O port access (1, 2, or 4).
14703 * @param enmAddrMode The addressing mode.
14704 * @param fRepPrefix Indicates whether a repeat prefix is used
14705 * (doesn't matter which for this instruction).
14706 * @param cbInstr The instruction length in bytes.
14707 * @param iEffSeg The effective segment address.
14708 * @param fIoChecked Whether the access to the I/O port has been
14709 * checked or not. It's typically checked in the
14710 * HM scenario.
14711 */
14712VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14713 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14714{
14715 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14716 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14717
14718 /*
14719 * State init.
14720 */
14721 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14722
14723 /*
14724 * Switch orgy for getting to the right handler.
14725 */
14726 VBOXSTRICTRC rcStrict;
14727 if (fRepPrefix)
14728 {
14729 switch (enmAddrMode)
14730 {
14731 case IEMMODE_16BIT:
14732 switch (cbValue)
14733 {
14734 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14735 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14736 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14737 default:
14738 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14739 }
14740 break;
14741
14742 case IEMMODE_32BIT:
14743 switch (cbValue)
14744 {
14745 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14746 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14747 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14748 default:
14749 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14750 }
14751 break;
14752
14753 case IEMMODE_64BIT:
14754 switch (cbValue)
14755 {
14756 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14757 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14758 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14759 default:
14760 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14761 }
14762 break;
14763
14764 default:
14765 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14766 }
14767 }
14768 else
14769 {
14770 switch (enmAddrMode)
14771 {
14772 case IEMMODE_16BIT:
14773 switch (cbValue)
14774 {
14775 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14776 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14777 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14778 default:
14779 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14780 }
14781 break;
14782
14783 case IEMMODE_32BIT:
14784 switch (cbValue)
14785 {
14786 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14787 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14788 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14789 default:
14790 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14791 }
14792 break;
14793
14794 case IEMMODE_64BIT:
14795 switch (cbValue)
14796 {
14797 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14798 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14799 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14800 default:
14801 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14802 }
14803 break;
14804
14805 default:
14806 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14807 }
14808 }
14809
14810 if (pVCpu->iem.s.cActiveMappings)
14811 iemMemRollback(pVCpu);
14812
14813 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14814}
14815
14816
14817/**
14818 * Interface for HM and EM for executing string I/O IN (read) instructions.
14819 *
14820 * This API ASSUMES that the caller has already verified that the guest code is
14821 * allowed to access the I/O port. (The I/O port is in the DX register in the
14822 * guest state.)
14823 *
14824 * @returns Strict VBox status code.
14825 * @param pVCpu The cross context virtual CPU structure.
14826 * @param cbValue The size of the I/O port access (1, 2, or 4).
14827 * @param enmAddrMode The addressing mode.
14828 * @param fRepPrefix Indicates whether a repeat prefix is used
14829 * (doesn't matter which for this instruction).
14830 * @param cbInstr The instruction length in bytes.
14831 * @param fIoChecked Whether the access to the I/O port has been
14832 * checked or not. It's typically checked in the
14833 * HM scenario.
14834 */
14835VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14836 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14837{
14838 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14839
14840 /*
14841 * State init.
14842 */
14843 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14844
14845 /*
14846 * Switch orgy for getting to the right handler.
14847 */
14848 VBOXSTRICTRC rcStrict;
14849 if (fRepPrefix)
14850 {
14851 switch (enmAddrMode)
14852 {
14853 case IEMMODE_16BIT:
14854 switch (cbValue)
14855 {
14856 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14857 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14858 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14859 default:
14860 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14861 }
14862 break;
14863
14864 case IEMMODE_32BIT:
14865 switch (cbValue)
14866 {
14867 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14868 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14869 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14870 default:
14871 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14872 }
14873 break;
14874
14875 case IEMMODE_64BIT:
14876 switch (cbValue)
14877 {
14878 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14879 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14880 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14881 default:
14882 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14883 }
14884 break;
14885
14886 default:
14887 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14888 }
14889 }
14890 else
14891 {
14892 switch (enmAddrMode)
14893 {
14894 case IEMMODE_16BIT:
14895 switch (cbValue)
14896 {
14897 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14898 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14899 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14900 default:
14901 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14902 }
14903 break;
14904
14905 case IEMMODE_32BIT:
14906 switch (cbValue)
14907 {
14908 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14909 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14910 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14911 default:
14912 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14913 }
14914 break;
14915
14916 case IEMMODE_64BIT:
14917 switch (cbValue)
14918 {
14919 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14920 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14921 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14922 default:
14923 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14924 }
14925 break;
14926
14927 default:
14928 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14929 }
14930 }
14931
14932 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
14933 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14934}
14935
14936
14937/**
14938 * Interface for rawmode to write execute an OUT instruction.
14939 *
14940 * @returns Strict VBox status code.
14941 * @param pVCpu The cross context virtual CPU structure.
14942 * @param cbInstr The instruction length in bytes.
14943 * @param u16Port The port to read.
14944 * @param cbReg The register size.
14945 *
14946 * @remarks In ring-0 not all of the state needs to be synced in.
14947 */
14948VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14949{
14950 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14951 Assert(cbReg <= 4 && cbReg != 3);
14952
14953 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14954 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14955 Assert(!pVCpu->iem.s.cActiveMappings);
14956 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14957}
14958
14959
14960/**
14961 * Interface for rawmode to write execute an IN instruction.
14962 *
14963 * @returns Strict VBox status code.
14964 * @param pVCpu The cross context virtual CPU structure.
14965 * @param cbInstr The instruction length in bytes.
14966 * @param u16Port The port to read.
14967 * @param cbReg The register size.
14968 */
14969VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14970{
14971 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14972 Assert(cbReg <= 4 && cbReg != 3);
14973
14974 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14975 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14976 Assert(!pVCpu->iem.s.cActiveMappings);
14977 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14978}
14979
14980
14981/**
14982 * Interface for HM and EM to write to a CRx register.
14983 *
14984 * @returns Strict VBox status code.
14985 * @param pVCpu The cross context virtual CPU structure.
14986 * @param cbInstr The instruction length in bytes.
14987 * @param iCrReg The control register number (destination).
14988 * @param iGReg The general purpose register number (source).
14989 *
14990 * @remarks In ring-0 not all of the state needs to be synced in.
14991 */
14992VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14993{
14994 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14995 Assert(iCrReg < 16);
14996 Assert(iGReg < 16);
14997
14998 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14999 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15000 Assert(!pVCpu->iem.s.cActiveMappings);
15001 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15002}
15003
15004
15005/**
15006 * Interface for HM and EM to read from a CRx register.
15007 *
15008 * @returns Strict VBox status code.
15009 * @param pVCpu The cross context virtual CPU structure.
15010 * @param cbInstr The instruction length in bytes.
15011 * @param iGReg The general purpose register number (destination).
15012 * @param iCrReg The control register number (source).
15013 *
15014 * @remarks In ring-0 not all of the state needs to be synced in.
15015 */
15016VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15017{
15018 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15019 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15020 | CPUMCTX_EXTRN_APIC_TPR);
15021 Assert(iCrReg < 16);
15022 Assert(iGReg < 16);
15023
15024 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15025 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15026 Assert(!pVCpu->iem.s.cActiveMappings);
15027 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15028}
15029
15030
15031/**
15032 * Interface for HM and EM to clear the CR0[TS] bit.
15033 *
15034 * @returns Strict VBox status code.
15035 * @param pVCpu The cross context virtual CPU structure.
15036 * @param cbInstr The instruction length in bytes.
15037 *
15038 * @remarks In ring-0 not all of the state needs to be synced in.
15039 */
15040VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15041{
15042 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15043
15044 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15045 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15046 Assert(!pVCpu->iem.s.cActiveMappings);
15047 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15048}
15049
15050
15051/**
15052 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15053 *
15054 * @returns Strict VBox status code.
15055 * @param pVCpu The cross context virtual CPU structure.
15056 * @param cbInstr The instruction length in bytes.
15057 * @param uValue The value to load into CR0.
15058 *
15059 * @remarks In ring-0 not all of the state needs to be synced in.
15060 */
15061VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15062{
15063 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15064
15065 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15066 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15067 Assert(!pVCpu->iem.s.cActiveMappings);
15068 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15069}
15070
15071
15072/**
15073 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15074 *
15075 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15076 *
15077 * @returns Strict VBox status code.
15078 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15079 * @param cbInstr The instruction length in bytes.
15080 * @remarks In ring-0 not all of the state needs to be synced in.
15081 * @thread EMT(pVCpu)
15082 */
15083VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15084{
15085 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15086
15087 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15088 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15089 Assert(!pVCpu->iem.s.cActiveMappings);
15090 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15091}
15092
15093
15094/**
15095 * Interface for HM and EM to emulate the WBINVD instruction.
15096 *
15097 * @returns Strict VBox status code.
15098 * @param pVCpu The cross context virtual CPU structure.
15099 * @param cbInstr The instruction length in bytes.
15100 *
15101 * @remarks In ring-0 not all of the state needs to be synced in.
15102 */
15103VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15104{
15105 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15106
15107 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15108 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15109 Assert(!pVCpu->iem.s.cActiveMappings);
15110 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15111}
15112
15113
15114/**
15115 * Interface for HM and EM to emulate the INVD instruction.
15116 *
15117 * @returns Strict VBox status code.
15118 * @param pVCpu The cross context virtual CPU structure.
15119 * @param cbInstr The instruction length in bytes.
15120 *
15121 * @remarks In ring-0 not all of the state needs to be synced in.
15122 */
15123VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15124{
15125 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15126
15127 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15128 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15129 Assert(!pVCpu->iem.s.cActiveMappings);
15130 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15131}
15132
15133
15134/**
15135 * Interface for HM and EM to emulate the INVLPG instruction.
15136 *
15137 * @returns Strict VBox status code.
15138 * @retval VINF_PGM_SYNC_CR3
15139 *
15140 * @param pVCpu The cross context virtual CPU structure.
15141 * @param cbInstr The instruction length in bytes.
15142 * @param GCPtrPage The effective address of the page to invalidate.
15143 *
15144 * @remarks In ring-0 not all of the state needs to be synced in.
15145 */
15146VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15147{
15148 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15149
15150 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15151 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15152 Assert(!pVCpu->iem.s.cActiveMappings);
15153 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15154}
15155
15156
15157/**
15158 * Interface for HM and EM to emulate the CPUID instruction.
15159 *
15160 * @returns Strict VBox status code.
15161 *
15162 * @param pVCpu The cross context virtual CPU structure.
15163 * @param cbInstr The instruction length in bytes.
15164 *
15165 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15166 */
15167VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15168{
15169 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15170 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15171
15172 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15173 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15174 Assert(!pVCpu->iem.s.cActiveMappings);
15175 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15176}
15177
15178
15179/**
15180 * Interface for HM and EM to emulate the RDPMC instruction.
15181 *
15182 * @returns Strict VBox status code.
15183 *
15184 * @param pVCpu The cross context virtual CPU structure.
15185 * @param cbInstr The instruction length in bytes.
15186 *
15187 * @remarks Not all of the state needs to be synced in.
15188 */
15189VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15190{
15191 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15192 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15193
15194 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15195 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15196 Assert(!pVCpu->iem.s.cActiveMappings);
15197 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15198}
15199
15200
15201/**
15202 * Interface for HM and EM to emulate the RDTSC instruction.
15203 *
15204 * @returns Strict VBox status code.
15205 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15206 *
15207 * @param pVCpu The cross context virtual CPU structure.
15208 * @param cbInstr The instruction length in bytes.
15209 *
15210 * @remarks Not all of the state needs to be synced in.
15211 */
15212VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15213{
15214 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15215 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15216
15217 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15218 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15219 Assert(!pVCpu->iem.s.cActiveMappings);
15220 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15221}
15222
15223
15224/**
15225 * Interface for HM and EM to emulate the RDTSCP instruction.
15226 *
15227 * @returns Strict VBox status code.
15228 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15229 *
15230 * @param pVCpu The cross context virtual CPU structure.
15231 * @param cbInstr The instruction length in bytes.
15232 *
15233 * @remarks Not all of the state needs to be synced in. Recommended
15234 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15235 */
15236VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15237{
15238 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15239 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15240
15241 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15242 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15243 Assert(!pVCpu->iem.s.cActiveMappings);
15244 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15245}
15246
15247
15248/**
15249 * Interface for HM and EM to emulate the RDMSR instruction.
15250 *
15251 * @returns Strict VBox status code.
15252 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15253 *
15254 * @param pVCpu The cross context virtual CPU structure.
15255 * @param cbInstr The instruction length in bytes.
15256 *
15257 * @remarks Not all of the state needs to be synced in. Requires RCX and
15258 * (currently) all MSRs.
15259 */
15260VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15261{
15262 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15263 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15264
15265 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15266 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15267 Assert(!pVCpu->iem.s.cActiveMappings);
15268 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15269}
15270
15271
15272/**
15273 * Interface for HM and EM to emulate the WRMSR instruction.
15274 *
15275 * @returns Strict VBox status code.
15276 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15277 *
15278 * @param pVCpu The cross context virtual CPU structure.
15279 * @param cbInstr The instruction length in bytes.
15280 *
15281 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15282 * and (currently) all MSRs.
15283 */
15284VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15285{
15286 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15287 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15288 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15289
15290 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15291 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15292 Assert(!pVCpu->iem.s.cActiveMappings);
15293 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15294}
15295
15296
15297/**
15298 * Interface for HM and EM to emulate the MONITOR instruction.
15299 *
15300 * @returns Strict VBox status code.
15301 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15302 *
15303 * @param pVCpu The cross context virtual CPU structure.
15304 * @param cbInstr The instruction length in bytes.
15305 *
15306 * @remarks Not all of the state needs to be synced in.
15307 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15308 * are used.
15309 */
15310VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15311{
15312 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15313 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15314
15315 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15316 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15317 Assert(!pVCpu->iem.s.cActiveMappings);
15318 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15319}
15320
15321
15322/**
15323 * Interface for HM and EM to emulate the MWAIT instruction.
15324 *
15325 * @returns Strict VBox status code.
15326 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15327 *
15328 * @param pVCpu The cross context virtual CPU structure.
15329 * @param cbInstr The instruction length in bytes.
15330 *
15331 * @remarks Not all of the state needs to be synced in.
15332 */
15333VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15334{
15335 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15336
15337 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15338 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15339 Assert(!pVCpu->iem.s.cActiveMappings);
15340 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15341}
15342
15343
15344/**
15345 * Interface for HM and EM to emulate the HLT instruction.
15346 *
15347 * @returns Strict VBox status code.
15348 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15349 *
15350 * @param pVCpu The cross context virtual CPU structure.
15351 * @param cbInstr The instruction length in bytes.
15352 *
15353 * @remarks Not all of the state needs to be synced in.
15354 */
15355VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15356{
15357 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15358
15359 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15360 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15361 Assert(!pVCpu->iem.s.cActiveMappings);
15362 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15363}
15364
15365
15366/**
15367 * Checks if IEM is in the process of delivering an event (interrupt or
15368 * exception).
15369 *
15370 * @returns true if we're in the process of raising an interrupt or exception,
15371 * false otherwise.
15372 * @param pVCpu The cross context virtual CPU structure.
15373 * @param puVector Where to store the vector associated with the
15374 * currently delivered event, optional.
15375 * @param pfFlags Where to store th event delivery flags (see
15376 * IEM_XCPT_FLAGS_XXX), optional.
15377 * @param puErr Where to store the error code associated with the
15378 * event, optional.
15379 * @param puCr2 Where to store the CR2 associated with the event,
15380 * optional.
15381 * @remarks The caller should check the flags to determine if the error code and
15382 * CR2 are valid for the event.
15383 */
15384VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15385{
15386 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15387 if (fRaisingXcpt)
15388 {
15389 if (puVector)
15390 *puVector = pVCpu->iem.s.uCurXcpt;
15391 if (pfFlags)
15392 *pfFlags = pVCpu->iem.s.fCurXcpt;
15393 if (puErr)
15394 *puErr = pVCpu->iem.s.uCurXcptErr;
15395 if (puCr2)
15396 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15397 }
15398 return fRaisingXcpt;
15399}
15400
15401#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15402
15403/**
15404 * Interface for HM and EM to emulate the CLGI instruction.
15405 *
15406 * @returns Strict VBox status code.
15407 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15408 * @param cbInstr The instruction length in bytes.
15409 * @thread EMT(pVCpu)
15410 */
15411VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15412{
15413 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15414
15415 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15416 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15417 Assert(!pVCpu->iem.s.cActiveMappings);
15418 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15419}
15420
15421
15422/**
15423 * Interface for HM and EM to emulate the STGI instruction.
15424 *
15425 * @returns Strict VBox status code.
15426 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15427 * @param cbInstr The instruction length in bytes.
15428 * @thread EMT(pVCpu)
15429 */
15430VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15431{
15432 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15433
15434 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15435 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15436 Assert(!pVCpu->iem.s.cActiveMappings);
15437 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15438}
15439
15440
15441/**
15442 * Interface for HM and EM to emulate the VMLOAD instruction.
15443 *
15444 * @returns Strict VBox status code.
15445 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15446 * @param cbInstr The instruction length in bytes.
15447 * @thread EMT(pVCpu)
15448 */
15449VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15450{
15451 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15452
15453 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15454 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15455 Assert(!pVCpu->iem.s.cActiveMappings);
15456 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15457}
15458
15459
15460/**
15461 * Interface for HM and EM to emulate the VMSAVE instruction.
15462 *
15463 * @returns Strict VBox status code.
15464 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15465 * @param cbInstr The instruction length in bytes.
15466 * @thread EMT(pVCpu)
15467 */
15468VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15469{
15470 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15471
15472 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15473 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15474 Assert(!pVCpu->iem.s.cActiveMappings);
15475 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15476}
15477
15478
15479/**
15480 * Interface for HM and EM to emulate the INVLPGA instruction.
15481 *
15482 * @returns Strict VBox status code.
15483 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15484 * @param cbInstr The instruction length in bytes.
15485 * @thread EMT(pVCpu)
15486 */
15487VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15488{
15489 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15490
15491 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15492 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15493 Assert(!pVCpu->iem.s.cActiveMappings);
15494 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15495}
15496
15497
15498/**
15499 * Interface for HM and EM to emulate the VMRUN instruction.
15500 *
15501 * @returns Strict VBox status code.
15502 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15503 * @param cbInstr The instruction length in bytes.
15504 * @thread EMT(pVCpu)
15505 */
15506VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15507{
15508 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15509 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15510
15511 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15512 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15513 Assert(!pVCpu->iem.s.cActiveMappings);
15514 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15515}
15516
15517
15518/**
15519 * Interface for HM and EM to emulate \#VMEXIT.
15520 *
15521 * @returns Strict VBox status code.
15522 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15523 * @param uExitCode The exit code.
15524 * @param uExitInfo1 The exit info. 1 field.
15525 * @param uExitInfo2 The exit info. 2 field.
15526 * @thread EMT(pVCpu)
15527 */
15528VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15529{
15530 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15531 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15532 if (pVCpu->iem.s.cActiveMappings)
15533 iemMemRollback(pVCpu);
15534 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15535}
15536
15537#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15538
15539#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15540
15541/**
15542 * Interface for HM and EM to emulate the VMREAD instruction.
15543 *
15544 * @returns Strict VBox status code.
15545 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15546 * @param pExitInfo Pointer to the VM-exit information struct.
15547 * @thread EMT(pVCpu)
15548 */
15549VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15550{
15551 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15552 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15553 Assert(pExitInfo);
15554
15555 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15556
15557 VBOXSTRICTRC rcStrict;
15558 uint8_t const cbInstr = pExitInfo->cbInstr;
15559 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15560 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15561 {
15562 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15563 {
15564 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15565 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15566 }
15567 else
15568 {
15569 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15570 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15571 }
15572 }
15573 else
15574 {
15575 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15576 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15577 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15578 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15579 }
15580 if (pVCpu->iem.s.cActiveMappings)
15581 iemMemRollback(pVCpu);
15582 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15583}
15584
15585
15586/**
15587 * Interface for HM and EM to emulate the VMWRITE instruction.
15588 *
15589 * @returns Strict VBox status code.
15590 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15591 * @param pExitInfo Pointer to the VM-exit information struct.
15592 * @thread EMT(pVCpu)
15593 */
15594VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15595{
15596 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15597 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15598 Assert(pExitInfo);
15599
15600 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15601
15602 uint64_t u64Val;
15603 uint8_t iEffSeg;
15604 IEMMODE enmEffAddrMode;
15605 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15606 {
15607 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15608 iEffSeg = UINT8_MAX;
15609 enmEffAddrMode = UINT8_MAX;
15610 }
15611 else
15612 {
15613 u64Val = pExitInfo->GCPtrEffAddr;
15614 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15615 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15616 }
15617 uint8_t const cbInstr = pExitInfo->cbInstr;
15618 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15619 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
15620 if (pVCpu->iem.s.cActiveMappings)
15621 iemMemRollback(pVCpu);
15622 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15623}
15624
15625
15626/**
15627 * Interface for HM and EM to emulate the VMPTRLD instruction.
15628 *
15629 * @returns Strict VBox status code.
15630 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15631 * @param pExitInfo Pointer to the VM-exit information struct.
15632 * @thread EMT(pVCpu)
15633 */
15634VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15635{
15636 Assert(pExitInfo);
15637 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15638 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15639
15640 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15641
15642 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15643 uint8_t const cbInstr = pExitInfo->cbInstr;
15644 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15645 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15646 if (pVCpu->iem.s.cActiveMappings)
15647 iemMemRollback(pVCpu);
15648 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15649}
15650
15651
15652/**
15653 * Interface for HM and EM to emulate the VMPTRST instruction.
15654 *
15655 * @returns Strict VBox status code.
15656 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15657 * @param pExitInfo Pointer to the VM-exit information struct.
15658 * @thread EMT(pVCpu)
15659 */
15660VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15661{
15662 Assert(pExitInfo);
15663 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15664 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15665
15666 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15667
15668 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15669 uint8_t const cbInstr = pExitInfo->cbInstr;
15670 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15671 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15672 if (pVCpu->iem.s.cActiveMappings)
15673 iemMemRollback(pVCpu);
15674 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15675}
15676
15677
15678/**
15679 * Interface for HM and EM to emulate the VMCLEAR instruction.
15680 *
15681 * @returns Strict VBox status code.
15682 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15683 * @param pExitInfo Pointer to the VM-exit information struct.
15684 * @thread EMT(pVCpu)
15685 */
15686VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15687{
15688 Assert(pExitInfo);
15689 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15690 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15691
15692 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15693
15694 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15695 uint8_t const cbInstr = pExitInfo->cbInstr;
15696 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15697 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15698 if (pVCpu->iem.s.cActiveMappings)
15699 iemMemRollback(pVCpu);
15700 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15701}
15702
15703
15704/**
15705 * Interface for HM and EM to emulate the VMXON instruction.
15706 *
15707 * @returns Strict VBox status code.
15708 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15709 * @param pExitInfo Pointer to the VM-exit information struct.
15710 * @thread EMT(pVCpu)
15711 */
15712VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15713{
15714 Assert(pExitInfo);
15715 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15716 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15717
15718 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15719
15720 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15721 uint8_t const cbInstr = pExitInfo->cbInstr;
15722 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
15723 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
15724 if (pVCpu->iem.s.cActiveMappings)
15725 iemMemRollback(pVCpu);
15726 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15727}
15728
15729
15730/**
15731 * Interface for HM and EM to emulate the VMXOFF instruction.
15732 *
15733 * @returns Strict VBox status code.
15734 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15735 * @param cbInstr The instruction length in bytes.
15736 * @thread EMT(pVCpu)
15737 */
15738VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
15739{
15740 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15741
15742 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15743 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
15744 Assert(!pVCpu->iem.s.cActiveMappings);
15745 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15746}
15747
15748#endif
15749
15750#ifdef IN_RING3
15751
15752/**
15753 * Handles the unlikely and probably fatal merge cases.
15754 *
15755 * @returns Merged status code.
15756 * @param rcStrict Current EM status code.
15757 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15758 * with @a rcStrict.
15759 * @param iMemMap The memory mapping index. For error reporting only.
15760 * @param pVCpu The cross context virtual CPU structure of the calling
15761 * thread, for error reporting only.
15762 */
15763DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15764 unsigned iMemMap, PVMCPU pVCpu)
15765{
15766 if (RT_FAILURE_NP(rcStrict))
15767 return rcStrict;
15768
15769 if (RT_FAILURE_NP(rcStrictCommit))
15770 return rcStrictCommit;
15771
15772 if (rcStrict == rcStrictCommit)
15773 return rcStrictCommit;
15774
15775 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15776 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15777 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15778 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15779 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15780 return VERR_IOM_FF_STATUS_IPE;
15781}
15782
15783
15784/**
15785 * Helper for IOMR3ProcessForceFlag.
15786 *
15787 * @returns Merged status code.
15788 * @param rcStrict Current EM status code.
15789 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15790 * with @a rcStrict.
15791 * @param iMemMap The memory mapping index. For error reporting only.
15792 * @param pVCpu The cross context virtual CPU structure of the calling
15793 * thread, for error reporting only.
15794 */
15795DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15796{
15797 /* Simple. */
15798 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15799 return rcStrictCommit;
15800
15801 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15802 return rcStrict;
15803
15804 /* EM scheduling status codes. */
15805 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15806 && rcStrict <= VINF_EM_LAST))
15807 {
15808 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15809 && rcStrictCommit <= VINF_EM_LAST))
15810 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15811 }
15812
15813 /* Unlikely */
15814 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15815}
15816
15817
15818/**
15819 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15820 *
15821 * @returns Merge between @a rcStrict and what the commit operation returned.
15822 * @param pVM The cross context VM structure.
15823 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15824 * @param rcStrict The status code returned by ring-0 or raw-mode.
15825 */
15826VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15827{
15828 /*
15829 * Reset the pending commit.
15830 */
15831 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15832 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15833 ("%#x %#x %#x\n",
15834 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15835 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15836
15837 /*
15838 * Commit the pending bounce buffers (usually just one).
15839 */
15840 unsigned cBufs = 0;
15841 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15842 while (iMemMap-- > 0)
15843 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15844 {
15845 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15846 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15847 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15848
15849 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15850 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15851 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15852
15853 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15854 {
15855 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15856 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15857 pbBuf,
15858 cbFirst,
15859 PGMACCESSORIGIN_IEM);
15860 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15861 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15862 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15863 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15864 }
15865
15866 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15867 {
15868 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15869 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15870 pbBuf + cbFirst,
15871 cbSecond,
15872 PGMACCESSORIGIN_IEM);
15873 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15874 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15875 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15876 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15877 }
15878 cBufs++;
15879 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15880 }
15881
15882 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15883 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15884 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15885 pVCpu->iem.s.cActiveMappings = 0;
15886 return rcStrict;
15887}
15888
15889#endif /* IN_RING3 */
15890
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette