VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 71078

Last change on this file since 71078 was 71078, checked in by vboxsync, 7 years ago

VMM/IEM: Nested Hw.virt: Fix GDTR, IDTR read intercepts. Previously it was combined incorrectly as GDTR read intercept.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 638.4 KB
Line 
1/* $Id: IEMAll.cpp 71078 2018-02-21 07:58:00Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/em.h>
106# include <VBox/vmm/hm_svm.h>
107#endif
108#include <VBox/vmm/tm.h>
109#include <VBox/vmm/dbgf.h>
110#include <VBox/vmm/dbgftrace.h>
111#ifdef VBOX_WITH_RAW_MODE_NOT_R0
112# include <VBox/vmm/patm.h>
113# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
114# include <VBox/vmm/csam.h>
115# endif
116#endif
117#include "IEMInternal.h"
118#ifdef IEM_VERIFICATION_MODE_FULL
119# include <VBox/vmm/rem.h>
120# include <VBox/vmm/mm.h>
121#endif
122#include <VBox/vmm/vm.h>
123#include <VBox/log.h>
124#include <VBox/err.h>
125#include <VBox/param.h>
126#include <VBox/dis.h>
127#include <VBox/disopcode.h>
128#include <iprt/assert.h>
129#include <iprt/string.h>
130#include <iprt/x86.h>
131
132
133/*********************************************************************************************************************************
134* Structures and Typedefs *
135*********************************************************************************************************************************/
136/** @typedef PFNIEMOP
137 * Pointer to an opcode decoder function.
138 */
139
140/** @def FNIEMOP_DEF
141 * Define an opcode decoder function.
142 *
143 * We're using macors for this so that adding and removing parameters as well as
144 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
145 *
146 * @param a_Name The function name.
147 */
148
149/** @typedef PFNIEMOPRM
150 * Pointer to an opcode decoder function with RM byte.
151 */
152
153/** @def FNIEMOPRM_DEF
154 * Define an opcode decoder function with RM byte.
155 *
156 * We're using macors for this so that adding and removing parameters as well as
157 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
158 *
159 * @param a_Name The function name.
160 */
161
162#if defined(__GNUC__) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
171
172#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
181
182#elif defined(__GNUC__)
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
191
192#else
193typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
194typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
195# define FNIEMOP_DEF(a_Name) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
197# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
198 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
199# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
200 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
201
202#endif
203#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
204
205
206/**
207 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
208 */
209typedef union IEMSELDESC
210{
211 /** The legacy view. */
212 X86DESC Legacy;
213 /** The long mode view. */
214 X86DESC64 Long;
215} IEMSELDESC;
216/** Pointer to a selector descriptor table entry. */
217typedef IEMSELDESC *PIEMSELDESC;
218
219/**
220 * CPU exception classes.
221 */
222typedef enum IEMXCPTCLASS
223{
224 IEMXCPTCLASS_BENIGN,
225 IEMXCPTCLASS_CONTRIBUTORY,
226 IEMXCPTCLASS_PAGE_FAULT,
227 IEMXCPTCLASS_DOUBLE_FAULT
228} IEMXCPTCLASS;
229
230
231/*********************************************************************************************************************************
232* Defined Constants And Macros *
233*********************************************************************************************************************************/
234/** @def IEM_WITH_SETJMP
235 * Enables alternative status code handling using setjmps.
236 *
237 * This adds a bit of expense via the setjmp() call since it saves all the
238 * non-volatile registers. However, it eliminates return code checks and allows
239 * for more optimal return value passing (return regs instead of stack buffer).
240 */
241#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
242# define IEM_WITH_SETJMP
243#endif
244
245/** Temporary hack to disable the double execution. Will be removed in favor
246 * of a dedicated execution mode in EM. */
247//#define IEM_VERIFICATION_MODE_NO_REM
248
249/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
250 * due to GCC lacking knowledge about the value range of a switch. */
251#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
252
253/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
254#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation.
259 */
260#ifdef LOG_ENABLED
261# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
262 do { \
263 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
265 } while (0)
266#else
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
269#endif
270
271/**
272 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
273 * occation using the supplied logger statement.
274 *
275 * @param a_LoggerArgs What to log on failure.
276 */
277#ifdef LOG_ENABLED
278# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
279 do { \
280 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
281 /*LogFunc(a_LoggerArgs);*/ \
282 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
283 } while (0)
284#else
285# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
286 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
287#endif
288
289/**
290 * Call an opcode decoder function.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF.
294 */
295#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
304
305/**
306 * Call a common opcode decoder function taking one extra argument.
307 *
308 * We're using macors for this so that adding and removing parameters can be
309 * done as we please. See FNIEMOP_DEF_1.
310 */
311#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
312
313/**
314 * Check if we're currently executing in real or virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The IEM state of the current CPU.
318 */
319#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in virtual 8086 mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in long mode.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT
391/**
392 * Check the common SVM instruction preconditions.
393 */
394# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
395 do { \
396 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
397 { \
398 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
399 return iemRaiseUndefinedOpcode(pVCpu); \
400 } \
401 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
402 { \
403 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
404 return iemRaiseUndefinedOpcode(pVCpu); \
405 } \
406 if (pVCpu->iem.s.uCpl != 0) \
407 { \
408 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
409 return iemRaiseGeneralProtectionFault0(pVCpu); \
410 } \
411 } while (0)
412
413/**
414 * Check if an SVM is enabled.
415 */
416# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
417
418/**
419 * Check if an SVM control/instruction intercept is set.
420 */
421# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
422
423/**
424 * Check if an SVM read CRx intercept is set.
425 */
426# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
427
428/**
429 * Check if an SVM write CRx intercept is set.
430 */
431# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
432
433/**
434 * Check if an SVM read DRx intercept is set.
435 */
436# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
437
438/**
439 * Check if an SVM write DRx intercept is set.
440 */
441# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
442
443/**
444 * Check if an SVM exception intercept is set.
445 */
446# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
447
448/**
449 * Invokes the SVM \#VMEXIT handler for the nested-guest.
450 */
451# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
452 do \
453 { \
454 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
455 } while (0)
456
457/**
458 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
459 * corresponding decode assist information.
460 */
461# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
462 do \
463 { \
464 uint64_t uExitInfo1; \
465 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
466 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
467 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
468 else \
469 uExitInfo1 = 0; \
470 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
471 } while (0)
472
473#else
474# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
475# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
476# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
477# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
478# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
479# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
480# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
481# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
482# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
483# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
484
485#endif /* VBOX_WITH_NESTED_HWVIRT */
486
487
488/*********************************************************************************************************************************
489* Global Variables *
490*********************************************************************************************************************************/
491extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
492
493
494/** Function table for the ADD instruction. */
495IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
496{
497 iemAImpl_add_u8, iemAImpl_add_u8_locked,
498 iemAImpl_add_u16, iemAImpl_add_u16_locked,
499 iemAImpl_add_u32, iemAImpl_add_u32_locked,
500 iemAImpl_add_u64, iemAImpl_add_u64_locked
501};
502
503/** Function table for the ADC instruction. */
504IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
505{
506 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
507 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
508 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
509 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
510};
511
512/** Function table for the SUB instruction. */
513IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
514{
515 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
516 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
517 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
518 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
519};
520
521/** Function table for the SBB instruction. */
522IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
523{
524 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
525 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
526 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
527 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
528};
529
530/** Function table for the OR instruction. */
531IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
532{
533 iemAImpl_or_u8, iemAImpl_or_u8_locked,
534 iemAImpl_or_u16, iemAImpl_or_u16_locked,
535 iemAImpl_or_u32, iemAImpl_or_u32_locked,
536 iemAImpl_or_u64, iemAImpl_or_u64_locked
537};
538
539/** Function table for the XOR instruction. */
540IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
541{
542 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
543 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
544 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
545 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
546};
547
548/** Function table for the AND instruction. */
549IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
550{
551 iemAImpl_and_u8, iemAImpl_and_u8_locked,
552 iemAImpl_and_u16, iemAImpl_and_u16_locked,
553 iemAImpl_and_u32, iemAImpl_and_u32_locked,
554 iemAImpl_and_u64, iemAImpl_and_u64_locked
555};
556
557/** Function table for the CMP instruction.
558 * @remarks Making operand order ASSUMPTIONS.
559 */
560IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
561{
562 iemAImpl_cmp_u8, NULL,
563 iemAImpl_cmp_u16, NULL,
564 iemAImpl_cmp_u32, NULL,
565 iemAImpl_cmp_u64, NULL
566};
567
568/** Function table for the TEST instruction.
569 * @remarks Making operand order ASSUMPTIONS.
570 */
571IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
572{
573 iemAImpl_test_u8, NULL,
574 iemAImpl_test_u16, NULL,
575 iemAImpl_test_u32, NULL,
576 iemAImpl_test_u64, NULL
577};
578
579/** Function table for the BT instruction. */
580IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
581{
582 NULL, NULL,
583 iemAImpl_bt_u16, NULL,
584 iemAImpl_bt_u32, NULL,
585 iemAImpl_bt_u64, NULL
586};
587
588/** Function table for the BTC instruction. */
589IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
590{
591 NULL, NULL,
592 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
593 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
594 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
595};
596
597/** Function table for the BTR instruction. */
598IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
599{
600 NULL, NULL,
601 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
602 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
603 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
604};
605
606/** Function table for the BTS instruction. */
607IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
608{
609 NULL, NULL,
610 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
611 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
612 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
613};
614
615/** Function table for the BSF instruction. */
616IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
617{
618 NULL, NULL,
619 iemAImpl_bsf_u16, NULL,
620 iemAImpl_bsf_u32, NULL,
621 iemAImpl_bsf_u64, NULL
622};
623
624/** Function table for the BSR instruction. */
625IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
626{
627 NULL, NULL,
628 iemAImpl_bsr_u16, NULL,
629 iemAImpl_bsr_u32, NULL,
630 iemAImpl_bsr_u64, NULL
631};
632
633/** Function table for the IMUL instruction. */
634IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
635{
636 NULL, NULL,
637 iemAImpl_imul_two_u16, NULL,
638 iemAImpl_imul_two_u32, NULL,
639 iemAImpl_imul_two_u64, NULL
640};
641
642/** Group 1 /r lookup table. */
643IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
644{
645 &g_iemAImpl_add,
646 &g_iemAImpl_or,
647 &g_iemAImpl_adc,
648 &g_iemAImpl_sbb,
649 &g_iemAImpl_and,
650 &g_iemAImpl_sub,
651 &g_iemAImpl_xor,
652 &g_iemAImpl_cmp
653};
654
655/** Function table for the INC instruction. */
656IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
657{
658 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
659 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
660 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
661 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
662};
663
664/** Function table for the DEC instruction. */
665IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
666{
667 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
668 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
669 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
670 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
671};
672
673/** Function table for the NEG instruction. */
674IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
675{
676 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
677 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
678 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
679 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
680};
681
682/** Function table for the NOT instruction. */
683IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
684{
685 iemAImpl_not_u8, iemAImpl_not_u8_locked,
686 iemAImpl_not_u16, iemAImpl_not_u16_locked,
687 iemAImpl_not_u32, iemAImpl_not_u32_locked,
688 iemAImpl_not_u64, iemAImpl_not_u64_locked
689};
690
691
692/** Function table for the ROL instruction. */
693IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
694{
695 iemAImpl_rol_u8,
696 iemAImpl_rol_u16,
697 iemAImpl_rol_u32,
698 iemAImpl_rol_u64
699};
700
701/** Function table for the ROR instruction. */
702IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
703{
704 iemAImpl_ror_u8,
705 iemAImpl_ror_u16,
706 iemAImpl_ror_u32,
707 iemAImpl_ror_u64
708};
709
710/** Function table for the RCL instruction. */
711IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
712{
713 iemAImpl_rcl_u8,
714 iemAImpl_rcl_u16,
715 iemAImpl_rcl_u32,
716 iemAImpl_rcl_u64
717};
718
719/** Function table for the RCR instruction. */
720IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
721{
722 iemAImpl_rcr_u8,
723 iemAImpl_rcr_u16,
724 iemAImpl_rcr_u32,
725 iemAImpl_rcr_u64
726};
727
728/** Function table for the SHL instruction. */
729IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
730{
731 iemAImpl_shl_u8,
732 iemAImpl_shl_u16,
733 iemAImpl_shl_u32,
734 iemAImpl_shl_u64
735};
736
737/** Function table for the SHR instruction. */
738IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
739{
740 iemAImpl_shr_u8,
741 iemAImpl_shr_u16,
742 iemAImpl_shr_u32,
743 iemAImpl_shr_u64
744};
745
746/** Function table for the SAR instruction. */
747IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
748{
749 iemAImpl_sar_u8,
750 iemAImpl_sar_u16,
751 iemAImpl_sar_u32,
752 iemAImpl_sar_u64
753};
754
755
756/** Function table for the MUL instruction. */
757IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
758{
759 iemAImpl_mul_u8,
760 iemAImpl_mul_u16,
761 iemAImpl_mul_u32,
762 iemAImpl_mul_u64
763};
764
765/** Function table for the IMUL instruction working implicitly on rAX. */
766IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
767{
768 iemAImpl_imul_u8,
769 iemAImpl_imul_u16,
770 iemAImpl_imul_u32,
771 iemAImpl_imul_u64
772};
773
774/** Function table for the DIV instruction. */
775IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
776{
777 iemAImpl_div_u8,
778 iemAImpl_div_u16,
779 iemAImpl_div_u32,
780 iemAImpl_div_u64
781};
782
783/** Function table for the MUL instruction. */
784IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
785{
786 iemAImpl_idiv_u8,
787 iemAImpl_idiv_u16,
788 iemAImpl_idiv_u32,
789 iemAImpl_idiv_u64
790};
791
792/** Function table for the SHLD instruction */
793IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
794{
795 iemAImpl_shld_u16,
796 iemAImpl_shld_u32,
797 iemAImpl_shld_u64,
798};
799
800/** Function table for the SHRD instruction */
801IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
802{
803 iemAImpl_shrd_u16,
804 iemAImpl_shrd_u32,
805 iemAImpl_shrd_u64,
806};
807
808
809/** Function table for the PUNPCKLBW instruction */
810IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
811/** Function table for the PUNPCKLBD instruction */
812IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
813/** Function table for the PUNPCKLDQ instruction */
814IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
815/** Function table for the PUNPCKLQDQ instruction */
816IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
817
818/** Function table for the PUNPCKHBW instruction */
819IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
820/** Function table for the PUNPCKHBD instruction */
821IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
822/** Function table for the PUNPCKHDQ instruction */
823IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
824/** Function table for the PUNPCKHQDQ instruction */
825IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
826
827/** Function table for the PXOR instruction */
828IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
829/** Function table for the PCMPEQB instruction */
830IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
831/** Function table for the PCMPEQW instruction */
832IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
833/** Function table for the PCMPEQD instruction */
834IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
835
836
837#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
838/** What IEM just wrote. */
839uint8_t g_abIemWrote[256];
840/** How much IEM just wrote. */
841size_t g_cbIemWrote;
842#endif
843
844
845/*********************************************************************************************************************************
846* Internal Functions *
847*********************************************************************************************************************************/
848IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
849IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
850IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
851IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
852/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
853IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
854IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
855IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
856IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
857IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
858IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
859IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
860IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
861IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
862IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
863IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
864IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
865#ifdef IEM_WITH_SETJMP
866DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
868DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
869DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
870DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
871#endif
872
873IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
874IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
875IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
876IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
877IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
878IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
879IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
880IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
881IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
882IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
883IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
884IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
885IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
886IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
887IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
888IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
889IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
890
891#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
892IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
893#endif
894IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
895IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
896
897#ifdef VBOX_WITH_NESTED_HWVIRT
898IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
899 uint64_t uExitInfo2);
900IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
901 uint32_t uErr, uint64_t uCr2);
902#endif
903
904/**
905 * Sets the pass up status.
906 *
907 * @returns VINF_SUCCESS.
908 * @param pVCpu The cross context virtual CPU structure of the
909 * calling thread.
910 * @param rcPassUp The pass up status. Must be informational.
911 * VINF_SUCCESS is not allowed.
912 */
913IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
914{
915 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
916
917 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
918 if (rcOldPassUp == VINF_SUCCESS)
919 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
920 /* If both are EM scheduling codes, use EM priority rules. */
921 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
922 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
923 {
924 if (rcPassUp < rcOldPassUp)
925 {
926 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
927 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
928 }
929 else
930 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
931 }
932 /* Override EM scheduling with specific status code. */
933 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
934 {
935 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
936 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
937 }
938 /* Don't override specific status code, first come first served. */
939 else
940 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
941 return VINF_SUCCESS;
942}
943
944
945/**
946 * Calculates the CPU mode.
947 *
948 * This is mainly for updating IEMCPU::enmCpuMode.
949 *
950 * @returns CPU mode.
951 * @param pCtx The register context for the CPU.
952 */
953DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
954{
955 if (CPUMIsGuestIn64BitCodeEx(pCtx))
956 return IEMMODE_64BIT;
957 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
958 return IEMMODE_32BIT;
959 return IEMMODE_16BIT;
960}
961
962
963/**
964 * Initializes the execution state.
965 *
966 * @param pVCpu The cross context virtual CPU structure of the
967 * calling thread.
968 * @param fBypassHandlers Whether to bypass access handlers.
969 *
970 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
971 * side-effects in strict builds.
972 */
973DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
974{
975 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
976
977 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
978
979#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
980 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
981 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
982 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
983 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
984 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
985 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
986 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
987 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
988#endif
989
990#ifdef VBOX_WITH_RAW_MODE_NOT_R0
991 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
992#endif
993 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
994 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
995#ifdef VBOX_STRICT
996 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
997 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
998 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
999 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1000 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1001 pVCpu->iem.s.uRexReg = 127;
1002 pVCpu->iem.s.uRexB = 127;
1003 pVCpu->iem.s.uRexIndex = 127;
1004 pVCpu->iem.s.iEffSeg = 127;
1005 pVCpu->iem.s.idxPrefix = 127;
1006 pVCpu->iem.s.uVex3rdReg = 127;
1007 pVCpu->iem.s.uVexLength = 127;
1008 pVCpu->iem.s.fEvexStuff = 127;
1009 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1010# ifdef IEM_WITH_CODE_TLB
1011 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1012 pVCpu->iem.s.pbInstrBuf = NULL;
1013 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1014 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1015 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1016 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1017# else
1018 pVCpu->iem.s.offOpcode = 127;
1019 pVCpu->iem.s.cbOpcode = 127;
1020# endif
1021#endif
1022
1023 pVCpu->iem.s.cActiveMappings = 0;
1024 pVCpu->iem.s.iNextMapping = 0;
1025 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1026 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1027#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1028 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1029 && pCtx->cs.u64Base == 0
1030 && pCtx->cs.u32Limit == UINT32_MAX
1031 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1032 if (!pVCpu->iem.s.fInPatchCode)
1033 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1034#endif
1035
1036#ifdef IEM_VERIFICATION_MODE_FULL
1037 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1038 pVCpu->iem.s.fNoRem = true;
1039#endif
1040}
1041
1042#ifdef VBOX_WITH_NESTED_HWVIRT
1043/**
1044 * Performs a minimal reinitialization of the execution state.
1045 *
1046 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1047 * 'world-switch' types operations on the CPU. Currently only nested
1048 * hardware-virtualization uses it.
1049 *
1050 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1051 */
1052IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1053{
1054 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1055 IEMMODE const enmMode = iemCalcCpuMode(pCtx);
1056 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1057
1058 pVCpu->iem.s.uCpl = uCpl;
1059 pVCpu->iem.s.enmCpuMode = enmMode;
1060 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1061 pVCpu->iem.s.enmEffAddrMode = enmMode;
1062 if (enmMode != IEMMODE_64BIT)
1063 {
1064 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1065 pVCpu->iem.s.enmEffOpSize = enmMode;
1066 }
1067 else
1068 {
1069 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1070 pVCpu->iem.s.enmEffOpSize = enmMode;
1071 }
1072 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1073#ifndef IEM_WITH_CODE_TLB
1074 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1075 pVCpu->iem.s.offOpcode = 0;
1076 pVCpu->iem.s.cbOpcode = 0;
1077#endif
1078}
1079#endif
1080
1081/**
1082 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1083 *
1084 * @param pVCpu The cross context virtual CPU structure of the
1085 * calling thread.
1086 */
1087DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1088{
1089 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1090#ifdef IEM_VERIFICATION_MODE_FULL
1091 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1092#endif
1093#ifdef VBOX_STRICT
1094# ifdef IEM_WITH_CODE_TLB
1095 NOREF(pVCpu);
1096# else
1097 pVCpu->iem.s.cbOpcode = 0;
1098# endif
1099#else
1100 NOREF(pVCpu);
1101#endif
1102}
1103
1104
1105/**
1106 * Initializes the decoder state.
1107 *
1108 * iemReInitDecoder is mostly a copy of this function.
1109 *
1110 * @param pVCpu The cross context virtual CPU structure of the
1111 * calling thread.
1112 * @param fBypassHandlers Whether to bypass access handlers.
1113 */
1114DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1115{
1116 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1117
1118 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1119
1120#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1123 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1128 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1129#endif
1130
1131#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1132 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1133#endif
1134 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1135#ifdef IEM_VERIFICATION_MODE_FULL
1136 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1137 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1138#endif
1139 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1140 pVCpu->iem.s.enmCpuMode = enmMode;
1141 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1142 pVCpu->iem.s.enmEffAddrMode = enmMode;
1143 if (enmMode != IEMMODE_64BIT)
1144 {
1145 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1146 pVCpu->iem.s.enmEffOpSize = enmMode;
1147 }
1148 else
1149 {
1150 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1151 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1152 }
1153 pVCpu->iem.s.fPrefixes = 0;
1154 pVCpu->iem.s.uRexReg = 0;
1155 pVCpu->iem.s.uRexB = 0;
1156 pVCpu->iem.s.uRexIndex = 0;
1157 pVCpu->iem.s.idxPrefix = 0;
1158 pVCpu->iem.s.uVex3rdReg = 0;
1159 pVCpu->iem.s.uVexLength = 0;
1160 pVCpu->iem.s.fEvexStuff = 0;
1161 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1162#ifdef IEM_WITH_CODE_TLB
1163 pVCpu->iem.s.pbInstrBuf = NULL;
1164 pVCpu->iem.s.offInstrNextByte = 0;
1165 pVCpu->iem.s.offCurInstrStart = 0;
1166# ifdef VBOX_STRICT
1167 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1168 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1169 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1170# endif
1171#else
1172 pVCpu->iem.s.offOpcode = 0;
1173 pVCpu->iem.s.cbOpcode = 0;
1174#endif
1175 pVCpu->iem.s.cActiveMappings = 0;
1176 pVCpu->iem.s.iNextMapping = 0;
1177 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1178 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1179#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1180 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1181 && pCtx->cs.u64Base == 0
1182 && pCtx->cs.u32Limit == UINT32_MAX
1183 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1184 if (!pVCpu->iem.s.fInPatchCode)
1185 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1186#endif
1187
1188#ifdef DBGFTRACE_ENABLED
1189 switch (enmMode)
1190 {
1191 case IEMMODE_64BIT:
1192 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1193 break;
1194 case IEMMODE_32BIT:
1195 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1196 break;
1197 case IEMMODE_16BIT:
1198 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1199 break;
1200 }
1201#endif
1202}
1203
1204
1205/**
1206 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1207 *
1208 * This is mostly a copy of iemInitDecoder.
1209 *
1210 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1211 */
1212DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1213{
1214 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1215
1216 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1217
1218#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1220 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1221 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1222 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1224 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1226 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1227#endif
1228
1229 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1230#ifdef IEM_VERIFICATION_MODE_FULL
1231 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1232 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1233#endif
1234 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1235 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1236 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1237 pVCpu->iem.s.enmEffAddrMode = enmMode;
1238 if (enmMode != IEMMODE_64BIT)
1239 {
1240 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1241 pVCpu->iem.s.enmEffOpSize = enmMode;
1242 }
1243 else
1244 {
1245 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1246 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1247 }
1248 pVCpu->iem.s.fPrefixes = 0;
1249 pVCpu->iem.s.uRexReg = 0;
1250 pVCpu->iem.s.uRexB = 0;
1251 pVCpu->iem.s.uRexIndex = 0;
1252 pVCpu->iem.s.idxPrefix = 0;
1253 pVCpu->iem.s.uVex3rdReg = 0;
1254 pVCpu->iem.s.uVexLength = 0;
1255 pVCpu->iem.s.fEvexStuff = 0;
1256 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1257#ifdef IEM_WITH_CODE_TLB
1258 if (pVCpu->iem.s.pbInstrBuf)
1259 {
1260 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1261 - pVCpu->iem.s.uInstrBufPc;
1262 if (off < pVCpu->iem.s.cbInstrBufTotal)
1263 {
1264 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1265 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1266 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1267 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1268 else
1269 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1270 }
1271 else
1272 {
1273 pVCpu->iem.s.pbInstrBuf = NULL;
1274 pVCpu->iem.s.offInstrNextByte = 0;
1275 pVCpu->iem.s.offCurInstrStart = 0;
1276 pVCpu->iem.s.cbInstrBuf = 0;
1277 pVCpu->iem.s.cbInstrBufTotal = 0;
1278 }
1279 }
1280 else
1281 {
1282 pVCpu->iem.s.offInstrNextByte = 0;
1283 pVCpu->iem.s.offCurInstrStart = 0;
1284 pVCpu->iem.s.cbInstrBuf = 0;
1285 pVCpu->iem.s.cbInstrBufTotal = 0;
1286 }
1287#else
1288 pVCpu->iem.s.cbOpcode = 0;
1289 pVCpu->iem.s.offOpcode = 0;
1290#endif
1291 Assert(pVCpu->iem.s.cActiveMappings == 0);
1292 pVCpu->iem.s.iNextMapping = 0;
1293 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1294 Assert(pVCpu->iem.s.fBypassHandlers == false);
1295#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1296 if (!pVCpu->iem.s.fInPatchCode)
1297 { /* likely */ }
1298 else
1299 {
1300 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1301 && pCtx->cs.u64Base == 0
1302 && pCtx->cs.u32Limit == UINT32_MAX
1303 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1304 if (!pVCpu->iem.s.fInPatchCode)
1305 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1306 }
1307#endif
1308
1309#ifdef DBGFTRACE_ENABLED
1310 switch (enmMode)
1311 {
1312 case IEMMODE_64BIT:
1313 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1314 break;
1315 case IEMMODE_32BIT:
1316 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1317 break;
1318 case IEMMODE_16BIT:
1319 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1320 break;
1321 }
1322#endif
1323}
1324
1325
1326
1327/**
1328 * Prefetch opcodes the first time when starting executing.
1329 *
1330 * @returns Strict VBox status code.
1331 * @param pVCpu The cross context virtual CPU structure of the
1332 * calling thread.
1333 * @param fBypassHandlers Whether to bypass access handlers.
1334 */
1335IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1336{
1337#ifdef IEM_VERIFICATION_MODE_FULL
1338 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1339#endif
1340 iemInitDecoder(pVCpu, fBypassHandlers);
1341
1342#ifdef IEM_WITH_CODE_TLB
1343 /** @todo Do ITLB lookup here. */
1344
1345#else /* !IEM_WITH_CODE_TLB */
1346
1347 /*
1348 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1349 *
1350 * First translate CS:rIP to a physical address.
1351 */
1352 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1353 uint32_t cbToTryRead;
1354 RTGCPTR GCPtrPC;
1355 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1356 {
1357 cbToTryRead = PAGE_SIZE;
1358 GCPtrPC = pCtx->rip;
1359 if (IEM_IS_CANONICAL(GCPtrPC))
1360 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1361 else
1362 return iemRaiseGeneralProtectionFault0(pVCpu);
1363 }
1364 else
1365 {
1366 uint32_t GCPtrPC32 = pCtx->eip;
1367 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1368 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1369 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1370 else
1371 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1372 if (cbToTryRead) { /* likely */ }
1373 else /* overflowed */
1374 {
1375 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1376 cbToTryRead = UINT32_MAX;
1377 }
1378 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1379 Assert(GCPtrPC <= UINT32_MAX);
1380 }
1381
1382# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1383 /* Allow interpretation of patch manager code blocks since they can for
1384 instance throw #PFs for perfectly good reasons. */
1385 if (pVCpu->iem.s.fInPatchCode)
1386 {
1387 size_t cbRead = 0;
1388 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1389 AssertRCReturn(rc, rc);
1390 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1391 return VINF_SUCCESS;
1392 }
1393# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1394
1395 RTGCPHYS GCPhys;
1396 uint64_t fFlags;
1397 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1398 if (RT_SUCCESS(rc)) { /* probable */ }
1399 else
1400 {
1401 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1402 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1403 }
1404 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1405 else
1406 {
1407 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1408 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1409 }
1410 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1411 else
1412 {
1413 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1414 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1415 }
1416 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1417 /** @todo Check reserved bits and such stuff. PGM is better at doing
1418 * that, so do it when implementing the guest virtual address
1419 * TLB... */
1420
1421# ifdef IEM_VERIFICATION_MODE_FULL
1422 /*
1423 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1424 * instruction.
1425 */
1426 /** @todo optimize this differently by not using PGMPhysRead. */
1427 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1428 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1429 if ( offPrevOpcodes < cbOldOpcodes
1430 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1431 {
1432 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1433 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1434 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1435 pVCpu->iem.s.cbOpcode = cbNew;
1436 return VINF_SUCCESS;
1437 }
1438# endif
1439
1440 /*
1441 * Read the bytes at this address.
1442 */
1443 PVM pVM = pVCpu->CTX_SUFF(pVM);
1444# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1445 size_t cbActual;
1446 if ( PATMIsEnabled(pVM)
1447 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1448 {
1449 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1450 Assert(cbActual > 0);
1451 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1452 }
1453 else
1454# endif
1455 {
1456 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1457 if (cbToTryRead > cbLeftOnPage)
1458 cbToTryRead = cbLeftOnPage;
1459 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1460 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1461
1462 if (!pVCpu->iem.s.fBypassHandlers)
1463 {
1464 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1465 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1466 { /* likely */ }
1467 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1468 {
1469 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1470 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1471 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1472 }
1473 else
1474 {
1475 Log((RT_SUCCESS(rcStrict)
1476 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1477 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1478 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1479 return rcStrict;
1480 }
1481 }
1482 else
1483 {
1484 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1485 if (RT_SUCCESS(rc))
1486 { /* likely */ }
1487 else
1488 {
1489 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1490 GCPtrPC, GCPhys, rc, cbToTryRead));
1491 return rc;
1492 }
1493 }
1494 pVCpu->iem.s.cbOpcode = cbToTryRead;
1495 }
1496#endif /* !IEM_WITH_CODE_TLB */
1497 return VINF_SUCCESS;
1498}
1499
1500
1501/**
1502 * Invalidates the IEM TLBs.
1503 *
1504 * This is called internally as well as by PGM when moving GC mappings.
1505 *
1506 * @returns
1507 * @param pVCpu The cross context virtual CPU structure of the calling
1508 * thread.
1509 * @param fVmm Set when PGM calls us with a remapping.
1510 */
1511VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1512{
1513#ifdef IEM_WITH_CODE_TLB
1514 pVCpu->iem.s.cbInstrBufTotal = 0;
1515 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1516 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1517 { /* very likely */ }
1518 else
1519 {
1520 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1521 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1522 while (i-- > 0)
1523 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1524 }
1525#endif
1526
1527#ifdef IEM_WITH_DATA_TLB
1528 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1529 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1530 { /* very likely */ }
1531 else
1532 {
1533 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1534 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1535 while (i-- > 0)
1536 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1537 }
1538#endif
1539 NOREF(pVCpu); NOREF(fVmm);
1540}
1541
1542
1543/**
1544 * Invalidates a page in the TLBs.
1545 *
1546 * @param pVCpu The cross context virtual CPU structure of the calling
1547 * thread.
1548 * @param GCPtr The address of the page to invalidate
1549 */
1550VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1551{
1552#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1553 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1554 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1555 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1556 uintptr_t idx = (uint8_t)GCPtr;
1557
1558# ifdef IEM_WITH_CODE_TLB
1559 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1560 {
1561 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1562 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1563 pVCpu->iem.s.cbInstrBufTotal = 0;
1564 }
1565# endif
1566
1567# ifdef IEM_WITH_DATA_TLB
1568 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1569 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1570# endif
1571#else
1572 NOREF(pVCpu); NOREF(GCPtr);
1573#endif
1574}
1575
1576
1577/**
1578 * Invalidates the host physical aspects of the IEM TLBs.
1579 *
1580 * This is called internally as well as by PGM when moving GC mappings.
1581 *
1582 * @param pVCpu The cross context virtual CPU structure of the calling
1583 * thread.
1584 */
1585VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1586{
1587#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1588 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1589
1590# ifdef IEM_WITH_CODE_TLB
1591 pVCpu->iem.s.cbInstrBufTotal = 0;
1592# endif
1593 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1594 if (uTlbPhysRev != 0)
1595 {
1596 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1597 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1598 }
1599 else
1600 {
1601 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1602 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1603
1604 unsigned i;
1605# ifdef IEM_WITH_CODE_TLB
1606 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1607 while (i-- > 0)
1608 {
1609 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1610 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1611 }
1612# endif
1613# ifdef IEM_WITH_DATA_TLB
1614 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1615 while (i-- > 0)
1616 {
1617 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1618 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1619 }
1620# endif
1621 }
1622#else
1623 NOREF(pVCpu);
1624#endif
1625}
1626
1627
1628/**
1629 * Invalidates the host physical aspects of the IEM TLBs.
1630 *
1631 * This is called internally as well as by PGM when moving GC mappings.
1632 *
1633 * @param pVM The cross context VM structure.
1634 *
1635 * @remarks Caller holds the PGM lock.
1636 */
1637VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1638{
1639 RT_NOREF_PV(pVM);
1640}
1641
1642#ifdef IEM_WITH_CODE_TLB
1643
1644/**
1645 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1646 * failure and jumps.
1647 *
1648 * We end up here for a number of reasons:
1649 * - pbInstrBuf isn't yet initialized.
1650 * - Advancing beyond the buffer boundrary (e.g. cross page).
1651 * - Advancing beyond the CS segment limit.
1652 * - Fetching from non-mappable page (e.g. MMIO).
1653 *
1654 * @param pVCpu The cross context virtual CPU structure of the
1655 * calling thread.
1656 * @param pvDst Where to return the bytes.
1657 * @param cbDst Number of bytes to read.
1658 *
1659 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1660 */
1661IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1662{
1663#ifdef IN_RING3
1664//__debugbreak();
1665 for (;;)
1666 {
1667 Assert(cbDst <= 8);
1668 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1669
1670 /*
1671 * We might have a partial buffer match, deal with that first to make the
1672 * rest simpler. This is the first part of the cross page/buffer case.
1673 */
1674 if (pVCpu->iem.s.pbInstrBuf != NULL)
1675 {
1676 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1677 {
1678 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1679 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1680 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1681
1682 cbDst -= cbCopy;
1683 pvDst = (uint8_t *)pvDst + cbCopy;
1684 offBuf += cbCopy;
1685 pVCpu->iem.s.offInstrNextByte += offBuf;
1686 }
1687 }
1688
1689 /*
1690 * Check segment limit, figuring how much we're allowed to access at this point.
1691 *
1692 * We will fault immediately if RIP is past the segment limit / in non-canonical
1693 * territory. If we do continue, there are one or more bytes to read before we
1694 * end up in trouble and we need to do that first before faulting.
1695 */
1696 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1697 RTGCPTR GCPtrFirst;
1698 uint32_t cbMaxRead;
1699 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1700 {
1701 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1702 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1703 { /* likely */ }
1704 else
1705 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1706 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1707 }
1708 else
1709 {
1710 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1711 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1712 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1713 { /* likely */ }
1714 else
1715 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1716 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1717 if (cbMaxRead != 0)
1718 { /* likely */ }
1719 else
1720 {
1721 /* Overflowed because address is 0 and limit is max. */
1722 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1723 cbMaxRead = X86_PAGE_SIZE;
1724 }
1725 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1726 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1727 if (cbMaxRead2 < cbMaxRead)
1728 cbMaxRead = cbMaxRead2;
1729 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1730 }
1731
1732 /*
1733 * Get the TLB entry for this piece of code.
1734 */
1735 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1736 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1737 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1738 if (pTlbe->uTag == uTag)
1739 {
1740 /* likely when executing lots of code, otherwise unlikely */
1741# ifdef VBOX_WITH_STATISTICS
1742 pVCpu->iem.s.CodeTlb.cTlbHits++;
1743# endif
1744 }
1745 else
1746 {
1747 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1748# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1749 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1750 {
1751 pTlbe->uTag = uTag;
1752 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1753 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1754 pTlbe->GCPhys = NIL_RTGCPHYS;
1755 pTlbe->pbMappingR3 = NULL;
1756 }
1757 else
1758# endif
1759 {
1760 RTGCPHYS GCPhys;
1761 uint64_t fFlags;
1762 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1763 if (RT_FAILURE(rc))
1764 {
1765 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1766 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1767 }
1768
1769 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1770 pTlbe->uTag = uTag;
1771 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1772 pTlbe->GCPhys = GCPhys;
1773 pTlbe->pbMappingR3 = NULL;
1774 }
1775 }
1776
1777 /*
1778 * Check TLB page table level access flags.
1779 */
1780 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1781 {
1782 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1783 {
1784 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1785 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1786 }
1787 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1788 {
1789 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1790 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1791 }
1792 }
1793
1794# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1795 /*
1796 * Allow interpretation of patch manager code blocks since they can for
1797 * instance throw #PFs for perfectly good reasons.
1798 */
1799 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1800 { /* no unlikely */ }
1801 else
1802 {
1803 /** @todo Could be optimized this a little in ring-3 if we liked. */
1804 size_t cbRead = 0;
1805 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1806 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1807 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1808 return;
1809 }
1810# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1811
1812 /*
1813 * Look up the physical page info if necessary.
1814 */
1815 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1816 { /* not necessary */ }
1817 else
1818 {
1819 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1820 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1821 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1822 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1823 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1824 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1825 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1826 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1827 }
1828
1829# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1830 /*
1831 * Try do a direct read using the pbMappingR3 pointer.
1832 */
1833 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1834 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1835 {
1836 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1837 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1838 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1839 {
1840 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1841 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1842 }
1843 else
1844 {
1845 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1846 Assert(cbInstr < cbMaxRead);
1847 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1848 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1849 }
1850 if (cbDst <= cbMaxRead)
1851 {
1852 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1853 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1854 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1855 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1856 return;
1857 }
1858 pVCpu->iem.s.pbInstrBuf = NULL;
1859
1860 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1861 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1862 }
1863 else
1864# endif
1865#if 0
1866 /*
1867 * If there is no special read handling, so we can read a bit more and
1868 * put it in the prefetch buffer.
1869 */
1870 if ( cbDst < cbMaxRead
1871 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1872 {
1873 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1874 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1875 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1876 { /* likely */ }
1877 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1878 {
1879 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1880 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1881 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1882 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1883 }
1884 else
1885 {
1886 Log((RT_SUCCESS(rcStrict)
1887 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1888 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1889 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1890 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1891 }
1892 }
1893 /*
1894 * Special read handling, so only read exactly what's needed.
1895 * This is a highly unlikely scenario.
1896 */
1897 else
1898#endif
1899 {
1900 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1901 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1902 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1903 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1904 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1905 { /* likely */ }
1906 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1907 {
1908 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1909 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1910 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1911 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1912 }
1913 else
1914 {
1915 Log((RT_SUCCESS(rcStrict)
1916 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1917 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1918 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1919 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1920 }
1921 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1922 if (cbToRead == cbDst)
1923 return;
1924 }
1925
1926 /*
1927 * More to read, loop.
1928 */
1929 cbDst -= cbMaxRead;
1930 pvDst = (uint8_t *)pvDst + cbMaxRead;
1931 }
1932#else
1933 RT_NOREF(pvDst, cbDst);
1934 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1935#endif
1936}
1937
1938#else
1939
1940/**
1941 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1942 * exception if it fails.
1943 *
1944 * @returns Strict VBox status code.
1945 * @param pVCpu The cross context virtual CPU structure of the
1946 * calling thread.
1947 * @param cbMin The minimum number of bytes relative offOpcode
1948 * that must be read.
1949 */
1950IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1951{
1952 /*
1953 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1954 *
1955 * First translate CS:rIP to a physical address.
1956 */
1957 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1958 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1959 uint32_t cbToTryRead;
1960 RTGCPTR GCPtrNext;
1961 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1962 {
1963 cbToTryRead = PAGE_SIZE;
1964 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1965 if (!IEM_IS_CANONICAL(GCPtrNext))
1966 return iemRaiseGeneralProtectionFault0(pVCpu);
1967 }
1968 else
1969 {
1970 uint32_t GCPtrNext32 = pCtx->eip;
1971 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1972 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1973 if (GCPtrNext32 > pCtx->cs.u32Limit)
1974 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1975 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1976 if (!cbToTryRead) /* overflowed */
1977 {
1978 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1979 cbToTryRead = UINT32_MAX;
1980 /** @todo check out wrapping around the code segment. */
1981 }
1982 if (cbToTryRead < cbMin - cbLeft)
1983 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1984 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1985 }
1986
1987 /* Only read up to the end of the page, and make sure we don't read more
1988 than the opcode buffer can hold. */
1989 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1990 if (cbToTryRead > cbLeftOnPage)
1991 cbToTryRead = cbLeftOnPage;
1992 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1993 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1994/** @todo r=bird: Convert assertion into undefined opcode exception? */
1995 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1996
1997# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1998 /* Allow interpretation of patch manager code blocks since they can for
1999 instance throw #PFs for perfectly good reasons. */
2000 if (pVCpu->iem.s.fInPatchCode)
2001 {
2002 size_t cbRead = 0;
2003 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2004 AssertRCReturn(rc, rc);
2005 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2006 return VINF_SUCCESS;
2007 }
2008# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2009
2010 RTGCPHYS GCPhys;
2011 uint64_t fFlags;
2012 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2013 if (RT_FAILURE(rc))
2014 {
2015 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2016 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2017 }
2018 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2019 {
2020 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2021 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2022 }
2023 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2024 {
2025 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2026 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2027 }
2028 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2029 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2030 /** @todo Check reserved bits and such stuff. PGM is better at doing
2031 * that, so do it when implementing the guest virtual address
2032 * TLB... */
2033
2034 /*
2035 * Read the bytes at this address.
2036 *
2037 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2038 * and since PATM should only patch the start of an instruction there
2039 * should be no need to check again here.
2040 */
2041 if (!pVCpu->iem.s.fBypassHandlers)
2042 {
2043 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2044 cbToTryRead, PGMACCESSORIGIN_IEM);
2045 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2046 { /* likely */ }
2047 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2048 {
2049 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2050 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2051 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2052 }
2053 else
2054 {
2055 Log((RT_SUCCESS(rcStrict)
2056 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2057 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2058 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2059 return rcStrict;
2060 }
2061 }
2062 else
2063 {
2064 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2065 if (RT_SUCCESS(rc))
2066 { /* likely */ }
2067 else
2068 {
2069 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2070 return rc;
2071 }
2072 }
2073 pVCpu->iem.s.cbOpcode += cbToTryRead;
2074 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2075
2076 return VINF_SUCCESS;
2077}
2078
2079#endif /* !IEM_WITH_CODE_TLB */
2080#ifndef IEM_WITH_SETJMP
2081
2082/**
2083 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2084 *
2085 * @returns Strict VBox status code.
2086 * @param pVCpu The cross context virtual CPU structure of the
2087 * calling thread.
2088 * @param pb Where to return the opcode byte.
2089 */
2090DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2091{
2092 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2093 if (rcStrict == VINF_SUCCESS)
2094 {
2095 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2096 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2097 pVCpu->iem.s.offOpcode = offOpcode + 1;
2098 }
2099 else
2100 *pb = 0;
2101 return rcStrict;
2102}
2103
2104
2105/**
2106 * Fetches the next opcode byte.
2107 *
2108 * @returns Strict VBox status code.
2109 * @param pVCpu The cross context virtual CPU structure of the
2110 * calling thread.
2111 * @param pu8 Where to return the opcode byte.
2112 */
2113DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2114{
2115 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2116 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2117 {
2118 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2119 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2120 return VINF_SUCCESS;
2121 }
2122 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2123}
2124
2125#else /* IEM_WITH_SETJMP */
2126
2127/**
2128 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2129 *
2130 * @returns The opcode byte.
2131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2132 */
2133DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2134{
2135# ifdef IEM_WITH_CODE_TLB
2136 uint8_t u8;
2137 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2138 return u8;
2139# else
2140 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2141 if (rcStrict == VINF_SUCCESS)
2142 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2143 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2144# endif
2145}
2146
2147
2148/**
2149 * Fetches the next opcode byte, longjmp on error.
2150 *
2151 * @returns The opcode byte.
2152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2153 */
2154DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2155{
2156# ifdef IEM_WITH_CODE_TLB
2157 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2158 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2159 if (RT_LIKELY( pbBuf != NULL
2160 && offBuf < pVCpu->iem.s.cbInstrBuf))
2161 {
2162 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2163 return pbBuf[offBuf];
2164 }
2165# else
2166 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2167 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2168 {
2169 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2170 return pVCpu->iem.s.abOpcode[offOpcode];
2171 }
2172# endif
2173 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2174}
2175
2176#endif /* IEM_WITH_SETJMP */
2177
2178/**
2179 * Fetches the next opcode byte, returns automatically on failure.
2180 *
2181 * @param a_pu8 Where to return the opcode byte.
2182 * @remark Implicitly references pVCpu.
2183 */
2184#ifndef IEM_WITH_SETJMP
2185# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2186 do \
2187 { \
2188 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2189 if (rcStrict2 == VINF_SUCCESS) \
2190 { /* likely */ } \
2191 else \
2192 return rcStrict2; \
2193 } while (0)
2194#else
2195# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2196#endif /* IEM_WITH_SETJMP */
2197
2198
2199#ifndef IEM_WITH_SETJMP
2200/**
2201 * Fetches the next signed byte from the opcode stream.
2202 *
2203 * @returns Strict VBox status code.
2204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2205 * @param pi8 Where to return the signed byte.
2206 */
2207DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2208{
2209 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2210}
2211#endif /* !IEM_WITH_SETJMP */
2212
2213
2214/**
2215 * Fetches the next signed byte from the opcode stream, returning automatically
2216 * on failure.
2217 *
2218 * @param a_pi8 Where to return the signed byte.
2219 * @remark Implicitly references pVCpu.
2220 */
2221#ifndef IEM_WITH_SETJMP
2222# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2223 do \
2224 { \
2225 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2226 if (rcStrict2 != VINF_SUCCESS) \
2227 return rcStrict2; \
2228 } while (0)
2229#else /* IEM_WITH_SETJMP */
2230# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2231
2232#endif /* IEM_WITH_SETJMP */
2233
2234#ifndef IEM_WITH_SETJMP
2235
2236/**
2237 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2238 *
2239 * @returns Strict VBox status code.
2240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2241 * @param pu16 Where to return the opcode dword.
2242 */
2243DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2244{
2245 uint8_t u8;
2246 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2247 if (rcStrict == VINF_SUCCESS)
2248 *pu16 = (int8_t)u8;
2249 return rcStrict;
2250}
2251
2252
2253/**
2254 * Fetches the next signed byte from the opcode stream, extending it to
2255 * unsigned 16-bit.
2256 *
2257 * @returns Strict VBox status code.
2258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2259 * @param pu16 Where to return the unsigned word.
2260 */
2261DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2262{
2263 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2264 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2265 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2266
2267 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2268 pVCpu->iem.s.offOpcode = offOpcode + 1;
2269 return VINF_SUCCESS;
2270}
2271
2272#endif /* !IEM_WITH_SETJMP */
2273
2274/**
2275 * Fetches the next signed byte from the opcode stream and sign-extending it to
2276 * a word, returning automatically on failure.
2277 *
2278 * @param a_pu16 Where to return the word.
2279 * @remark Implicitly references pVCpu.
2280 */
2281#ifndef IEM_WITH_SETJMP
2282# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2283 do \
2284 { \
2285 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2286 if (rcStrict2 != VINF_SUCCESS) \
2287 return rcStrict2; \
2288 } while (0)
2289#else
2290# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2291#endif
2292
2293#ifndef IEM_WITH_SETJMP
2294
2295/**
2296 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2297 *
2298 * @returns Strict VBox status code.
2299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2300 * @param pu32 Where to return the opcode dword.
2301 */
2302DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2303{
2304 uint8_t u8;
2305 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2306 if (rcStrict == VINF_SUCCESS)
2307 *pu32 = (int8_t)u8;
2308 return rcStrict;
2309}
2310
2311
2312/**
2313 * Fetches the next signed byte from the opcode stream, extending it to
2314 * unsigned 32-bit.
2315 *
2316 * @returns Strict VBox status code.
2317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2318 * @param pu32 Where to return the unsigned dword.
2319 */
2320DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2321{
2322 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2323 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2324 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2325
2326 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2327 pVCpu->iem.s.offOpcode = offOpcode + 1;
2328 return VINF_SUCCESS;
2329}
2330
2331#endif /* !IEM_WITH_SETJMP */
2332
2333/**
2334 * Fetches the next signed byte from the opcode stream and sign-extending it to
2335 * a word, returning automatically on failure.
2336 *
2337 * @param a_pu32 Where to return the word.
2338 * @remark Implicitly references pVCpu.
2339 */
2340#ifndef IEM_WITH_SETJMP
2341#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2342 do \
2343 { \
2344 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2345 if (rcStrict2 != VINF_SUCCESS) \
2346 return rcStrict2; \
2347 } while (0)
2348#else
2349# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2350#endif
2351
2352#ifndef IEM_WITH_SETJMP
2353
2354/**
2355 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2356 *
2357 * @returns Strict VBox status code.
2358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2359 * @param pu64 Where to return the opcode qword.
2360 */
2361DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2362{
2363 uint8_t u8;
2364 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2365 if (rcStrict == VINF_SUCCESS)
2366 *pu64 = (int8_t)u8;
2367 return rcStrict;
2368}
2369
2370
2371/**
2372 * Fetches the next signed byte from the opcode stream, extending it to
2373 * unsigned 64-bit.
2374 *
2375 * @returns Strict VBox status code.
2376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2377 * @param pu64 Where to return the unsigned qword.
2378 */
2379DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2380{
2381 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2382 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2383 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2384
2385 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2386 pVCpu->iem.s.offOpcode = offOpcode + 1;
2387 return VINF_SUCCESS;
2388}
2389
2390#endif /* !IEM_WITH_SETJMP */
2391
2392
2393/**
2394 * Fetches the next signed byte from the opcode stream and sign-extending it to
2395 * a word, returning automatically on failure.
2396 *
2397 * @param a_pu64 Where to return the word.
2398 * @remark Implicitly references pVCpu.
2399 */
2400#ifndef IEM_WITH_SETJMP
2401# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2402 do \
2403 { \
2404 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2405 if (rcStrict2 != VINF_SUCCESS) \
2406 return rcStrict2; \
2407 } while (0)
2408#else
2409# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2410#endif
2411
2412
2413#ifndef IEM_WITH_SETJMP
2414
2415/**
2416 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2417 *
2418 * @returns Strict VBox status code.
2419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2420 * @param pu16 Where to return the opcode word.
2421 */
2422DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2423{
2424 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2425 if (rcStrict == VINF_SUCCESS)
2426 {
2427 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2428# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2429 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2430# else
2431 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2432# endif
2433 pVCpu->iem.s.offOpcode = offOpcode + 2;
2434 }
2435 else
2436 *pu16 = 0;
2437 return rcStrict;
2438}
2439
2440
2441/**
2442 * Fetches the next opcode word.
2443 *
2444 * @returns Strict VBox status code.
2445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2446 * @param pu16 Where to return the opcode word.
2447 */
2448DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2449{
2450 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2451 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2452 {
2453 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2454# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2455 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2456# else
2457 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2458# endif
2459 return VINF_SUCCESS;
2460 }
2461 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2462}
2463
2464#else /* IEM_WITH_SETJMP */
2465
2466/**
2467 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2468 *
2469 * @returns The opcode word.
2470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2471 */
2472DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2473{
2474# ifdef IEM_WITH_CODE_TLB
2475 uint16_t u16;
2476 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2477 return u16;
2478# else
2479 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2480 if (rcStrict == VINF_SUCCESS)
2481 {
2482 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2483 pVCpu->iem.s.offOpcode += 2;
2484# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2485 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2486# else
2487 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2488# endif
2489 }
2490 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2491# endif
2492}
2493
2494
2495/**
2496 * Fetches the next opcode word, longjmp on error.
2497 *
2498 * @returns The opcode word.
2499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2500 */
2501DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2502{
2503# ifdef IEM_WITH_CODE_TLB
2504 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2505 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2506 if (RT_LIKELY( pbBuf != NULL
2507 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2508 {
2509 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2510# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2511 return *(uint16_t const *)&pbBuf[offBuf];
2512# else
2513 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2514# endif
2515 }
2516# else
2517 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2518 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2519 {
2520 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2521# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2522 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2523# else
2524 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2525# endif
2526 }
2527# endif
2528 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2529}
2530
2531#endif /* IEM_WITH_SETJMP */
2532
2533
2534/**
2535 * Fetches the next opcode word, returns automatically on failure.
2536 *
2537 * @param a_pu16 Where to return the opcode word.
2538 * @remark Implicitly references pVCpu.
2539 */
2540#ifndef IEM_WITH_SETJMP
2541# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2542 do \
2543 { \
2544 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2545 if (rcStrict2 != VINF_SUCCESS) \
2546 return rcStrict2; \
2547 } while (0)
2548#else
2549# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2550#endif
2551
2552#ifndef IEM_WITH_SETJMP
2553
2554/**
2555 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2556 *
2557 * @returns Strict VBox status code.
2558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2559 * @param pu32 Where to return the opcode double word.
2560 */
2561DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2562{
2563 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2564 if (rcStrict == VINF_SUCCESS)
2565 {
2566 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2567 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2568 pVCpu->iem.s.offOpcode = offOpcode + 2;
2569 }
2570 else
2571 *pu32 = 0;
2572 return rcStrict;
2573}
2574
2575
2576/**
2577 * Fetches the next opcode word, zero extending it to a double word.
2578 *
2579 * @returns Strict VBox status code.
2580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2581 * @param pu32 Where to return the opcode double word.
2582 */
2583DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2584{
2585 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2586 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2587 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2588
2589 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2590 pVCpu->iem.s.offOpcode = offOpcode + 2;
2591 return VINF_SUCCESS;
2592}
2593
2594#endif /* !IEM_WITH_SETJMP */
2595
2596
2597/**
2598 * Fetches the next opcode word and zero extends it to a double word, returns
2599 * automatically on failure.
2600 *
2601 * @param a_pu32 Where to return the opcode double word.
2602 * @remark Implicitly references pVCpu.
2603 */
2604#ifndef IEM_WITH_SETJMP
2605# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2606 do \
2607 { \
2608 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2609 if (rcStrict2 != VINF_SUCCESS) \
2610 return rcStrict2; \
2611 } while (0)
2612#else
2613# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2614#endif
2615
2616#ifndef IEM_WITH_SETJMP
2617
2618/**
2619 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2620 *
2621 * @returns Strict VBox status code.
2622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2623 * @param pu64 Where to return the opcode quad word.
2624 */
2625DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2626{
2627 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2628 if (rcStrict == VINF_SUCCESS)
2629 {
2630 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2631 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2632 pVCpu->iem.s.offOpcode = offOpcode + 2;
2633 }
2634 else
2635 *pu64 = 0;
2636 return rcStrict;
2637}
2638
2639
2640/**
2641 * Fetches the next opcode word, zero extending it to a quad word.
2642 *
2643 * @returns Strict VBox status code.
2644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2645 * @param pu64 Where to return the opcode quad word.
2646 */
2647DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2648{
2649 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2650 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2651 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2652
2653 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2654 pVCpu->iem.s.offOpcode = offOpcode + 2;
2655 return VINF_SUCCESS;
2656}
2657
2658#endif /* !IEM_WITH_SETJMP */
2659
2660/**
2661 * Fetches the next opcode word and zero extends it to a quad word, returns
2662 * automatically on failure.
2663 *
2664 * @param a_pu64 Where to return the opcode quad word.
2665 * @remark Implicitly references pVCpu.
2666 */
2667#ifndef IEM_WITH_SETJMP
2668# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2669 do \
2670 { \
2671 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2672 if (rcStrict2 != VINF_SUCCESS) \
2673 return rcStrict2; \
2674 } while (0)
2675#else
2676# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2677#endif
2678
2679
2680#ifndef IEM_WITH_SETJMP
2681/**
2682 * Fetches the next signed word from the opcode stream.
2683 *
2684 * @returns Strict VBox status code.
2685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2686 * @param pi16 Where to return the signed word.
2687 */
2688DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2689{
2690 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2691}
2692#endif /* !IEM_WITH_SETJMP */
2693
2694
2695/**
2696 * Fetches the next signed word from the opcode stream, returning automatically
2697 * on failure.
2698 *
2699 * @param a_pi16 Where to return the signed word.
2700 * @remark Implicitly references pVCpu.
2701 */
2702#ifndef IEM_WITH_SETJMP
2703# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2704 do \
2705 { \
2706 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2707 if (rcStrict2 != VINF_SUCCESS) \
2708 return rcStrict2; \
2709 } while (0)
2710#else
2711# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2712#endif
2713
2714#ifndef IEM_WITH_SETJMP
2715
2716/**
2717 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2718 *
2719 * @returns Strict VBox status code.
2720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2721 * @param pu32 Where to return the opcode dword.
2722 */
2723DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2724{
2725 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2726 if (rcStrict == VINF_SUCCESS)
2727 {
2728 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2729# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2730 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2731# else
2732 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2733 pVCpu->iem.s.abOpcode[offOpcode + 1],
2734 pVCpu->iem.s.abOpcode[offOpcode + 2],
2735 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2736# endif
2737 pVCpu->iem.s.offOpcode = offOpcode + 4;
2738 }
2739 else
2740 *pu32 = 0;
2741 return rcStrict;
2742}
2743
2744
2745/**
2746 * Fetches the next opcode dword.
2747 *
2748 * @returns Strict VBox status code.
2749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2750 * @param pu32 Where to return the opcode double word.
2751 */
2752DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2753{
2754 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2755 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2756 {
2757 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2758# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2759 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2760# else
2761 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2762 pVCpu->iem.s.abOpcode[offOpcode + 1],
2763 pVCpu->iem.s.abOpcode[offOpcode + 2],
2764 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2765# endif
2766 return VINF_SUCCESS;
2767 }
2768 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2769}
2770
2771#else /* !IEM_WITH_SETJMP */
2772
2773/**
2774 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2775 *
2776 * @returns The opcode dword.
2777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2778 */
2779DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2780{
2781# ifdef IEM_WITH_CODE_TLB
2782 uint32_t u32;
2783 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2784 return u32;
2785# else
2786 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2787 if (rcStrict == VINF_SUCCESS)
2788 {
2789 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2790 pVCpu->iem.s.offOpcode = offOpcode + 4;
2791# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2792 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2793# else
2794 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2795 pVCpu->iem.s.abOpcode[offOpcode + 1],
2796 pVCpu->iem.s.abOpcode[offOpcode + 2],
2797 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2798# endif
2799 }
2800 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2801# endif
2802}
2803
2804
2805/**
2806 * Fetches the next opcode dword, longjmp on error.
2807 *
2808 * @returns The opcode dword.
2809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2810 */
2811DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2812{
2813# ifdef IEM_WITH_CODE_TLB
2814 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2815 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2816 if (RT_LIKELY( pbBuf != NULL
2817 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2818 {
2819 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2820# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2821 return *(uint32_t const *)&pbBuf[offBuf];
2822# else
2823 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2824 pbBuf[offBuf + 1],
2825 pbBuf[offBuf + 2],
2826 pbBuf[offBuf + 3]);
2827# endif
2828 }
2829# else
2830 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2831 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2832 {
2833 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2834# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2835 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2836# else
2837 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2838 pVCpu->iem.s.abOpcode[offOpcode + 1],
2839 pVCpu->iem.s.abOpcode[offOpcode + 2],
2840 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2841# endif
2842 }
2843# endif
2844 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2845}
2846
2847#endif /* !IEM_WITH_SETJMP */
2848
2849
2850/**
2851 * Fetches the next opcode dword, returns automatically on failure.
2852 *
2853 * @param a_pu32 Where to return the opcode dword.
2854 * @remark Implicitly references pVCpu.
2855 */
2856#ifndef IEM_WITH_SETJMP
2857# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2858 do \
2859 { \
2860 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2861 if (rcStrict2 != VINF_SUCCESS) \
2862 return rcStrict2; \
2863 } while (0)
2864#else
2865# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2866#endif
2867
2868#ifndef IEM_WITH_SETJMP
2869
2870/**
2871 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2872 *
2873 * @returns Strict VBox status code.
2874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2875 * @param pu64 Where to return the opcode dword.
2876 */
2877DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2878{
2879 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2880 if (rcStrict == VINF_SUCCESS)
2881 {
2882 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2883 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2884 pVCpu->iem.s.abOpcode[offOpcode + 1],
2885 pVCpu->iem.s.abOpcode[offOpcode + 2],
2886 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2887 pVCpu->iem.s.offOpcode = offOpcode + 4;
2888 }
2889 else
2890 *pu64 = 0;
2891 return rcStrict;
2892}
2893
2894
2895/**
2896 * Fetches the next opcode dword, zero extending it to a quad word.
2897 *
2898 * @returns Strict VBox status code.
2899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2900 * @param pu64 Where to return the opcode quad word.
2901 */
2902DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2903{
2904 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2905 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2906 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2907
2908 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2909 pVCpu->iem.s.abOpcode[offOpcode + 1],
2910 pVCpu->iem.s.abOpcode[offOpcode + 2],
2911 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2912 pVCpu->iem.s.offOpcode = offOpcode + 4;
2913 return VINF_SUCCESS;
2914}
2915
2916#endif /* !IEM_WITH_SETJMP */
2917
2918
2919/**
2920 * Fetches the next opcode dword and zero extends it to a quad word, returns
2921 * automatically on failure.
2922 *
2923 * @param a_pu64 Where to return the opcode quad word.
2924 * @remark Implicitly references pVCpu.
2925 */
2926#ifndef IEM_WITH_SETJMP
2927# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2928 do \
2929 { \
2930 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2931 if (rcStrict2 != VINF_SUCCESS) \
2932 return rcStrict2; \
2933 } while (0)
2934#else
2935# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2936#endif
2937
2938
2939#ifndef IEM_WITH_SETJMP
2940/**
2941 * Fetches the next signed double word from the opcode stream.
2942 *
2943 * @returns Strict VBox status code.
2944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2945 * @param pi32 Where to return the signed double word.
2946 */
2947DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2948{
2949 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2950}
2951#endif
2952
2953/**
2954 * Fetches the next signed double word from the opcode stream, returning
2955 * automatically on failure.
2956 *
2957 * @param a_pi32 Where to return the signed double word.
2958 * @remark Implicitly references pVCpu.
2959 */
2960#ifndef IEM_WITH_SETJMP
2961# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2962 do \
2963 { \
2964 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2965 if (rcStrict2 != VINF_SUCCESS) \
2966 return rcStrict2; \
2967 } while (0)
2968#else
2969# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2970#endif
2971
2972#ifndef IEM_WITH_SETJMP
2973
2974/**
2975 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2976 *
2977 * @returns Strict VBox status code.
2978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2979 * @param pu64 Where to return the opcode qword.
2980 */
2981DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2982{
2983 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2984 if (rcStrict == VINF_SUCCESS)
2985 {
2986 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2987 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2988 pVCpu->iem.s.abOpcode[offOpcode + 1],
2989 pVCpu->iem.s.abOpcode[offOpcode + 2],
2990 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2991 pVCpu->iem.s.offOpcode = offOpcode + 4;
2992 }
2993 else
2994 *pu64 = 0;
2995 return rcStrict;
2996}
2997
2998
2999/**
3000 * Fetches the next opcode dword, sign extending it into a quad word.
3001 *
3002 * @returns Strict VBox status code.
3003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3004 * @param pu64 Where to return the opcode quad word.
3005 */
3006DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3007{
3008 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3009 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3010 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3011
3012 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3013 pVCpu->iem.s.abOpcode[offOpcode + 1],
3014 pVCpu->iem.s.abOpcode[offOpcode + 2],
3015 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3016 *pu64 = i32;
3017 pVCpu->iem.s.offOpcode = offOpcode + 4;
3018 return VINF_SUCCESS;
3019}
3020
3021#endif /* !IEM_WITH_SETJMP */
3022
3023
3024/**
3025 * Fetches the next opcode double word and sign extends it to a quad word,
3026 * returns automatically on failure.
3027 *
3028 * @param a_pu64 Where to return the opcode quad word.
3029 * @remark Implicitly references pVCpu.
3030 */
3031#ifndef IEM_WITH_SETJMP
3032# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3033 do \
3034 { \
3035 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3036 if (rcStrict2 != VINF_SUCCESS) \
3037 return rcStrict2; \
3038 } while (0)
3039#else
3040# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3041#endif
3042
3043#ifndef IEM_WITH_SETJMP
3044
3045/**
3046 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3047 *
3048 * @returns Strict VBox status code.
3049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3050 * @param pu64 Where to return the opcode qword.
3051 */
3052DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3053{
3054 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3055 if (rcStrict == VINF_SUCCESS)
3056 {
3057 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3058# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3059 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3060# else
3061 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3062 pVCpu->iem.s.abOpcode[offOpcode + 1],
3063 pVCpu->iem.s.abOpcode[offOpcode + 2],
3064 pVCpu->iem.s.abOpcode[offOpcode + 3],
3065 pVCpu->iem.s.abOpcode[offOpcode + 4],
3066 pVCpu->iem.s.abOpcode[offOpcode + 5],
3067 pVCpu->iem.s.abOpcode[offOpcode + 6],
3068 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3069# endif
3070 pVCpu->iem.s.offOpcode = offOpcode + 8;
3071 }
3072 else
3073 *pu64 = 0;
3074 return rcStrict;
3075}
3076
3077
3078/**
3079 * Fetches the next opcode qword.
3080 *
3081 * @returns Strict VBox status code.
3082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3083 * @param pu64 Where to return the opcode qword.
3084 */
3085DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3086{
3087 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3088 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3089 {
3090# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3091 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3092# else
3093 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3094 pVCpu->iem.s.abOpcode[offOpcode + 1],
3095 pVCpu->iem.s.abOpcode[offOpcode + 2],
3096 pVCpu->iem.s.abOpcode[offOpcode + 3],
3097 pVCpu->iem.s.abOpcode[offOpcode + 4],
3098 pVCpu->iem.s.abOpcode[offOpcode + 5],
3099 pVCpu->iem.s.abOpcode[offOpcode + 6],
3100 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3101# endif
3102 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3103 return VINF_SUCCESS;
3104 }
3105 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3106}
3107
3108#else /* IEM_WITH_SETJMP */
3109
3110/**
3111 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3112 *
3113 * @returns The opcode qword.
3114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3115 */
3116DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3117{
3118# ifdef IEM_WITH_CODE_TLB
3119 uint64_t u64;
3120 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3121 return u64;
3122# else
3123 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3124 if (rcStrict == VINF_SUCCESS)
3125 {
3126 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3127 pVCpu->iem.s.offOpcode = offOpcode + 8;
3128# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3129 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3130# else
3131 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3132 pVCpu->iem.s.abOpcode[offOpcode + 1],
3133 pVCpu->iem.s.abOpcode[offOpcode + 2],
3134 pVCpu->iem.s.abOpcode[offOpcode + 3],
3135 pVCpu->iem.s.abOpcode[offOpcode + 4],
3136 pVCpu->iem.s.abOpcode[offOpcode + 5],
3137 pVCpu->iem.s.abOpcode[offOpcode + 6],
3138 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3139# endif
3140 }
3141 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3142# endif
3143}
3144
3145
3146/**
3147 * Fetches the next opcode qword, longjmp on error.
3148 *
3149 * @returns The opcode qword.
3150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3151 */
3152DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3153{
3154# ifdef IEM_WITH_CODE_TLB
3155 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3156 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3157 if (RT_LIKELY( pbBuf != NULL
3158 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3159 {
3160 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3161# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3162 return *(uint64_t const *)&pbBuf[offBuf];
3163# else
3164 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3165 pbBuf[offBuf + 1],
3166 pbBuf[offBuf + 2],
3167 pbBuf[offBuf + 3],
3168 pbBuf[offBuf + 4],
3169 pbBuf[offBuf + 5],
3170 pbBuf[offBuf + 6],
3171 pbBuf[offBuf + 7]);
3172# endif
3173 }
3174# else
3175 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3176 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3177 {
3178 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3179# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3180 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3181# else
3182 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3183 pVCpu->iem.s.abOpcode[offOpcode + 1],
3184 pVCpu->iem.s.abOpcode[offOpcode + 2],
3185 pVCpu->iem.s.abOpcode[offOpcode + 3],
3186 pVCpu->iem.s.abOpcode[offOpcode + 4],
3187 pVCpu->iem.s.abOpcode[offOpcode + 5],
3188 pVCpu->iem.s.abOpcode[offOpcode + 6],
3189 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3190# endif
3191 }
3192# endif
3193 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3194}
3195
3196#endif /* IEM_WITH_SETJMP */
3197
3198/**
3199 * Fetches the next opcode quad word, returns automatically on failure.
3200 *
3201 * @param a_pu64 Where to return the opcode quad word.
3202 * @remark Implicitly references pVCpu.
3203 */
3204#ifndef IEM_WITH_SETJMP
3205# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3206 do \
3207 { \
3208 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3209 if (rcStrict2 != VINF_SUCCESS) \
3210 return rcStrict2; \
3211 } while (0)
3212#else
3213# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3214#endif
3215
3216
3217/** @name Misc Worker Functions.
3218 * @{
3219 */
3220
3221/**
3222 * Gets the exception class for the specified exception vector.
3223 *
3224 * @returns The class of the specified exception.
3225 * @param uVector The exception vector.
3226 */
3227IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3228{
3229 Assert(uVector <= X86_XCPT_LAST);
3230 switch (uVector)
3231 {
3232 case X86_XCPT_DE:
3233 case X86_XCPT_TS:
3234 case X86_XCPT_NP:
3235 case X86_XCPT_SS:
3236 case X86_XCPT_GP:
3237 case X86_XCPT_SX: /* AMD only */
3238 return IEMXCPTCLASS_CONTRIBUTORY;
3239
3240 case X86_XCPT_PF:
3241 case X86_XCPT_VE: /* Intel only */
3242 return IEMXCPTCLASS_PAGE_FAULT;
3243
3244 case X86_XCPT_DF:
3245 return IEMXCPTCLASS_DOUBLE_FAULT;
3246 }
3247 return IEMXCPTCLASS_BENIGN;
3248}
3249
3250
3251/**
3252 * Evaluates how to handle an exception caused during delivery of another event
3253 * (exception / interrupt).
3254 *
3255 * @returns How to handle the recursive exception.
3256 * @param pVCpu The cross context virtual CPU structure of the
3257 * calling thread.
3258 * @param fPrevFlags The flags of the previous event.
3259 * @param uPrevVector The vector of the previous event.
3260 * @param fCurFlags The flags of the current exception.
3261 * @param uCurVector The vector of the current exception.
3262 * @param pfXcptRaiseInfo Where to store additional information about the
3263 * exception condition. Optional.
3264 */
3265VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3266 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3267{
3268 /*
3269 * Only CPU exceptions can be raised while delivering other events, software interrupt
3270 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3271 */
3272 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3273 Assert(pVCpu); RT_NOREF(pVCpu);
3274 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3275
3276 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3277 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3278 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3279 {
3280 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3281 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3282 {
3283 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3284 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3285 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3286 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3287 {
3288 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3289 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3290 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3291 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3292 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3293 }
3294 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3295 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3296 {
3297 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3298 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3299 }
3300 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3301 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3302 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3303 {
3304 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3305 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3306 }
3307 }
3308 else
3309 {
3310 if (uPrevVector == X86_XCPT_NMI)
3311 {
3312 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3313 if (uCurVector == X86_XCPT_PF)
3314 {
3315 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3316 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3317 }
3318 }
3319 else if ( uPrevVector == X86_XCPT_AC
3320 && uCurVector == X86_XCPT_AC)
3321 {
3322 enmRaise = IEMXCPTRAISE_CPU_HANG;
3323 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3324 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3325 }
3326 }
3327 }
3328 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3329 {
3330 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3331 if (uCurVector == X86_XCPT_PF)
3332 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3333 }
3334 else
3335 {
3336 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3337 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3338 }
3339
3340 if (pfXcptRaiseInfo)
3341 *pfXcptRaiseInfo = fRaiseInfo;
3342 return enmRaise;
3343}
3344
3345
3346/**
3347 * Enters the CPU shutdown state initiated by a triple fault or other
3348 * unrecoverable conditions.
3349 *
3350 * @returns Strict VBox status code.
3351 * @param pVCpu The cross context virtual CPU structure of the
3352 * calling thread.
3353 */
3354IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3355{
3356 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3357 {
3358 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3359 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3360 }
3361
3362 RT_NOREF(pVCpu);
3363 return VINF_EM_TRIPLE_FAULT;
3364}
3365
3366
3367/**
3368 * Validates a new SS segment.
3369 *
3370 * @returns VBox strict status code.
3371 * @param pVCpu The cross context virtual CPU structure of the
3372 * calling thread.
3373 * @param pCtx The CPU context.
3374 * @param NewSS The new SS selctor.
3375 * @param uCpl The CPL to load the stack for.
3376 * @param pDesc Where to return the descriptor.
3377 */
3378IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3379{
3380 NOREF(pCtx);
3381
3382 /* Null selectors are not allowed (we're not called for dispatching
3383 interrupts with SS=0 in long mode). */
3384 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3385 {
3386 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3387 return iemRaiseTaskSwitchFault0(pVCpu);
3388 }
3389
3390 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3391 if ((NewSS & X86_SEL_RPL) != uCpl)
3392 {
3393 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3394 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3395 }
3396
3397 /*
3398 * Read the descriptor.
3399 */
3400 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3401 if (rcStrict != VINF_SUCCESS)
3402 return rcStrict;
3403
3404 /*
3405 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3406 */
3407 if (!pDesc->Legacy.Gen.u1DescType)
3408 {
3409 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3410 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3411 }
3412
3413 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3414 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3415 {
3416 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3417 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3418 }
3419 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3420 {
3421 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3422 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3423 }
3424
3425 /* Is it there? */
3426 /** @todo testcase: Is this checked before the canonical / limit check below? */
3427 if (!pDesc->Legacy.Gen.u1Present)
3428 {
3429 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3430 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3431 }
3432
3433 return VINF_SUCCESS;
3434}
3435
3436
3437/**
3438 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3439 * not.
3440 *
3441 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3442 * @param a_pCtx The CPU context.
3443 */
3444#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3445# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3446 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3447 ? (a_pCtx)->eflags.u \
3448 : CPUMRawGetEFlags(a_pVCpu) )
3449#else
3450# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3451 ( (a_pCtx)->eflags.u )
3452#endif
3453
3454/**
3455 * Updates the EFLAGS in the correct manner wrt. PATM.
3456 *
3457 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3458 * @param a_pCtx The CPU context.
3459 * @param a_fEfl The new EFLAGS.
3460 */
3461#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3462# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3463 do { \
3464 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3465 (a_pCtx)->eflags.u = (a_fEfl); \
3466 else \
3467 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3468 } while (0)
3469#else
3470# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3471 do { \
3472 (a_pCtx)->eflags.u = (a_fEfl); \
3473 } while (0)
3474#endif
3475
3476
3477/** @} */
3478
3479/** @name Raising Exceptions.
3480 *
3481 * @{
3482 */
3483
3484
3485/**
3486 * Loads the specified stack far pointer from the TSS.
3487 *
3488 * @returns VBox strict status code.
3489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3490 * @param pCtx The CPU context.
3491 * @param uCpl The CPL to load the stack for.
3492 * @param pSelSS Where to return the new stack segment.
3493 * @param puEsp Where to return the new stack pointer.
3494 */
3495IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3496 PRTSEL pSelSS, uint32_t *puEsp)
3497{
3498 VBOXSTRICTRC rcStrict;
3499 Assert(uCpl < 4);
3500
3501 switch (pCtx->tr.Attr.n.u4Type)
3502 {
3503 /*
3504 * 16-bit TSS (X86TSS16).
3505 */
3506 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3507 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3508 {
3509 uint32_t off = uCpl * 4 + 2;
3510 if (off + 4 <= pCtx->tr.u32Limit)
3511 {
3512 /** @todo check actual access pattern here. */
3513 uint32_t u32Tmp = 0; /* gcc maybe... */
3514 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3515 if (rcStrict == VINF_SUCCESS)
3516 {
3517 *puEsp = RT_LOWORD(u32Tmp);
3518 *pSelSS = RT_HIWORD(u32Tmp);
3519 return VINF_SUCCESS;
3520 }
3521 }
3522 else
3523 {
3524 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3525 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3526 }
3527 break;
3528 }
3529
3530 /*
3531 * 32-bit TSS (X86TSS32).
3532 */
3533 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3534 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3535 {
3536 uint32_t off = uCpl * 8 + 4;
3537 if (off + 7 <= pCtx->tr.u32Limit)
3538 {
3539/** @todo check actual access pattern here. */
3540 uint64_t u64Tmp;
3541 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3542 if (rcStrict == VINF_SUCCESS)
3543 {
3544 *puEsp = u64Tmp & UINT32_MAX;
3545 *pSelSS = (RTSEL)(u64Tmp >> 32);
3546 return VINF_SUCCESS;
3547 }
3548 }
3549 else
3550 {
3551 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3552 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3553 }
3554 break;
3555 }
3556
3557 default:
3558 AssertFailed();
3559 rcStrict = VERR_IEM_IPE_4;
3560 break;
3561 }
3562
3563 *puEsp = 0; /* make gcc happy */
3564 *pSelSS = 0; /* make gcc happy */
3565 return rcStrict;
3566}
3567
3568
3569/**
3570 * Loads the specified stack pointer from the 64-bit TSS.
3571 *
3572 * @returns VBox strict status code.
3573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3574 * @param pCtx The CPU context.
3575 * @param uCpl The CPL to load the stack for.
3576 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3577 * @param puRsp Where to return the new stack pointer.
3578 */
3579IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3580{
3581 Assert(uCpl < 4);
3582 Assert(uIst < 8);
3583 *puRsp = 0; /* make gcc happy */
3584
3585 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3586
3587 uint32_t off;
3588 if (uIst)
3589 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3590 else
3591 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3592 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3593 {
3594 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3595 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3596 }
3597
3598 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3599}
3600
3601
3602/**
3603 * Adjust the CPU state according to the exception being raised.
3604 *
3605 * @param pCtx The CPU context.
3606 * @param u8Vector The exception that has been raised.
3607 */
3608DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3609{
3610 switch (u8Vector)
3611 {
3612 case X86_XCPT_DB:
3613 pCtx->dr[7] &= ~X86_DR7_GD;
3614 break;
3615 /** @todo Read the AMD and Intel exception reference... */
3616 }
3617}
3618
3619
3620/**
3621 * Implements exceptions and interrupts for real mode.
3622 *
3623 * @returns VBox strict status code.
3624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3625 * @param pCtx The CPU context.
3626 * @param cbInstr The number of bytes to offset rIP by in the return
3627 * address.
3628 * @param u8Vector The interrupt / exception vector number.
3629 * @param fFlags The flags.
3630 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3631 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3632 */
3633IEM_STATIC VBOXSTRICTRC
3634iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3635 PCPUMCTX pCtx,
3636 uint8_t cbInstr,
3637 uint8_t u8Vector,
3638 uint32_t fFlags,
3639 uint16_t uErr,
3640 uint64_t uCr2)
3641{
3642 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3643 NOREF(uErr); NOREF(uCr2);
3644
3645 /*
3646 * Read the IDT entry.
3647 */
3648 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3649 {
3650 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3651 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3652 }
3653 RTFAR16 Idte;
3654 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3655 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3656 return rcStrict;
3657
3658 /*
3659 * Push the stack frame.
3660 */
3661 uint16_t *pu16Frame;
3662 uint64_t uNewRsp;
3663 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3664 if (rcStrict != VINF_SUCCESS)
3665 return rcStrict;
3666
3667 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3668#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3669 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3670 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3671 fEfl |= UINT16_C(0xf000);
3672#endif
3673 pu16Frame[2] = (uint16_t)fEfl;
3674 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3675 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3676 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3677 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3678 return rcStrict;
3679
3680 /*
3681 * Load the vector address into cs:ip and make exception specific state
3682 * adjustments.
3683 */
3684 pCtx->cs.Sel = Idte.sel;
3685 pCtx->cs.ValidSel = Idte.sel;
3686 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3687 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3688 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3689 pCtx->rip = Idte.off;
3690 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3691 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3692
3693 /** @todo do we actually do this in real mode? */
3694 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3695 iemRaiseXcptAdjustState(pCtx, u8Vector);
3696
3697 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3698}
3699
3700
3701/**
3702 * Loads a NULL data selector into when coming from V8086 mode.
3703 *
3704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3705 * @param pSReg Pointer to the segment register.
3706 */
3707IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3708{
3709 pSReg->Sel = 0;
3710 pSReg->ValidSel = 0;
3711 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3712 {
3713 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3714 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3715 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3716 }
3717 else
3718 {
3719 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3720 /** @todo check this on AMD-V */
3721 pSReg->u64Base = 0;
3722 pSReg->u32Limit = 0;
3723 }
3724}
3725
3726
3727/**
3728 * Loads a segment selector during a task switch in V8086 mode.
3729 *
3730 * @param pSReg Pointer to the segment register.
3731 * @param uSel The selector value to load.
3732 */
3733IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3734{
3735 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3736 pSReg->Sel = uSel;
3737 pSReg->ValidSel = uSel;
3738 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3739 pSReg->u64Base = uSel << 4;
3740 pSReg->u32Limit = 0xffff;
3741 pSReg->Attr.u = 0xf3;
3742}
3743
3744
3745/**
3746 * Loads a NULL data selector into a selector register, both the hidden and
3747 * visible parts, in protected mode.
3748 *
3749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3750 * @param pSReg Pointer to the segment register.
3751 * @param uRpl The RPL.
3752 */
3753IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3754{
3755 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3756 * data selector in protected mode. */
3757 pSReg->Sel = uRpl;
3758 pSReg->ValidSel = uRpl;
3759 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3760 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3761 {
3762 /* VT-x (Intel 3960x) observed doing something like this. */
3763 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3764 pSReg->u32Limit = UINT32_MAX;
3765 pSReg->u64Base = 0;
3766 }
3767 else
3768 {
3769 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3770 pSReg->u32Limit = 0;
3771 pSReg->u64Base = 0;
3772 }
3773}
3774
3775
3776/**
3777 * Loads a segment selector during a task switch in protected mode.
3778 *
3779 * In this task switch scenario, we would throw \#TS exceptions rather than
3780 * \#GPs.
3781 *
3782 * @returns VBox strict status code.
3783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3784 * @param pSReg Pointer to the segment register.
3785 * @param uSel The new selector value.
3786 *
3787 * @remarks This does _not_ handle CS or SS.
3788 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3789 */
3790IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3791{
3792 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3793
3794 /* Null data selector. */
3795 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3796 {
3797 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3798 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3799 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3800 return VINF_SUCCESS;
3801 }
3802
3803 /* Fetch the descriptor. */
3804 IEMSELDESC Desc;
3805 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3806 if (rcStrict != VINF_SUCCESS)
3807 {
3808 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3809 VBOXSTRICTRC_VAL(rcStrict)));
3810 return rcStrict;
3811 }
3812
3813 /* Must be a data segment or readable code segment. */
3814 if ( !Desc.Legacy.Gen.u1DescType
3815 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3816 {
3817 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3818 Desc.Legacy.Gen.u4Type));
3819 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3820 }
3821
3822 /* Check privileges for data segments and non-conforming code segments. */
3823 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3824 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3825 {
3826 /* The RPL and the new CPL must be less than or equal to the DPL. */
3827 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3828 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3829 {
3830 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3831 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3832 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3833 }
3834 }
3835
3836 /* Is it there? */
3837 if (!Desc.Legacy.Gen.u1Present)
3838 {
3839 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3840 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3841 }
3842
3843 /* The base and limit. */
3844 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3845 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3846
3847 /*
3848 * Ok, everything checked out fine. Now set the accessed bit before
3849 * committing the result into the registers.
3850 */
3851 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3852 {
3853 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3854 if (rcStrict != VINF_SUCCESS)
3855 return rcStrict;
3856 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3857 }
3858
3859 /* Commit */
3860 pSReg->Sel = uSel;
3861 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3862 pSReg->u32Limit = cbLimit;
3863 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3864 pSReg->ValidSel = uSel;
3865 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3866 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3867 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3868
3869 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3870 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3871 return VINF_SUCCESS;
3872}
3873
3874
3875/**
3876 * Performs a task switch.
3877 *
3878 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3879 * caller is responsible for performing the necessary checks (like DPL, TSS
3880 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3881 * reference for JMP, CALL, IRET.
3882 *
3883 * If the task switch is the due to a software interrupt or hardware exception,
3884 * the caller is responsible for validating the TSS selector and descriptor. See
3885 * Intel Instruction reference for INT n.
3886 *
3887 * @returns VBox strict status code.
3888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3889 * @param pCtx The CPU context.
3890 * @param enmTaskSwitch What caused this task switch.
3891 * @param uNextEip The EIP effective after the task switch.
3892 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3893 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3894 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3895 * @param SelTSS The TSS selector of the new task.
3896 * @param pNewDescTSS Pointer to the new TSS descriptor.
3897 */
3898IEM_STATIC VBOXSTRICTRC
3899iemTaskSwitch(PVMCPU pVCpu,
3900 PCPUMCTX pCtx,
3901 IEMTASKSWITCH enmTaskSwitch,
3902 uint32_t uNextEip,
3903 uint32_t fFlags,
3904 uint16_t uErr,
3905 uint64_t uCr2,
3906 RTSEL SelTSS,
3907 PIEMSELDESC pNewDescTSS)
3908{
3909 Assert(!IEM_IS_REAL_MODE(pVCpu));
3910 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3911
3912 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3913 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3914 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3915 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3916 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3917
3918 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3919 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3920
3921 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3922 fIsNewTSS386, pCtx->eip, uNextEip));
3923
3924 /* Update CR2 in case it's a page-fault. */
3925 /** @todo This should probably be done much earlier in IEM/PGM. See
3926 * @bugref{5653#c49}. */
3927 if (fFlags & IEM_XCPT_FLAGS_CR2)
3928 pCtx->cr2 = uCr2;
3929
3930 /*
3931 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3932 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3933 */
3934 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3935 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3936 if (uNewTSSLimit < uNewTSSLimitMin)
3937 {
3938 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3939 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3940 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3941 }
3942
3943 /*
3944 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3945 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3946 */
3947 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3948 {
3949 uint32_t const uExitInfo1 = SelTSS;
3950 uint32_t uExitInfo2 = uErr;
3951 switch (enmTaskSwitch)
3952 {
3953 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3954 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3955 default: break;
3956 }
3957 if (fFlags & IEM_XCPT_FLAGS_ERR)
3958 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3959 if (pCtx->eflags.Bits.u1RF)
3960 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3961
3962 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3963 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3964 RT_NOREF2(uExitInfo1, uExitInfo2);
3965 }
3966 /** @todo Nested-VMX task-switch intercept. */
3967
3968 /*
3969 * Check the current TSS limit. The last written byte to the current TSS during the
3970 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3971 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3972 *
3973 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3974 * end up with smaller than "legal" TSS limits.
3975 */
3976 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3977 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3978 if (uCurTSSLimit < uCurTSSLimitMin)
3979 {
3980 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3981 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3982 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3983 }
3984
3985 /*
3986 * Verify that the new TSS can be accessed and map it. Map only the required contents
3987 * and not the entire TSS.
3988 */
3989 void *pvNewTSS;
3990 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3991 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3992 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3993 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3994 * not perform correct translation if this happens. See Intel spec. 7.2.1
3995 * "Task-State Segment" */
3996 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3997 if (rcStrict != VINF_SUCCESS)
3998 {
3999 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4000 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4001 return rcStrict;
4002 }
4003
4004 /*
4005 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4006 */
4007 uint32_t u32EFlags = pCtx->eflags.u32;
4008 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4009 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4010 {
4011 PX86DESC pDescCurTSS;
4012 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4013 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4014 if (rcStrict != VINF_SUCCESS)
4015 {
4016 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4017 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4018 return rcStrict;
4019 }
4020
4021 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4022 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4023 if (rcStrict != VINF_SUCCESS)
4024 {
4025 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4026 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4027 return rcStrict;
4028 }
4029
4030 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4031 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4032 {
4033 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4034 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4035 u32EFlags &= ~X86_EFL_NT;
4036 }
4037 }
4038
4039 /*
4040 * Save the CPU state into the current TSS.
4041 */
4042 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4043 if (GCPtrNewTSS == GCPtrCurTSS)
4044 {
4045 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4046 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4047 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4048 }
4049 if (fIsNewTSS386)
4050 {
4051 /*
4052 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4053 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4054 */
4055 void *pvCurTSS32;
4056 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4057 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4058 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4059 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4060 if (rcStrict != VINF_SUCCESS)
4061 {
4062 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4063 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4064 return rcStrict;
4065 }
4066
4067 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4068 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4069 pCurTSS32->eip = uNextEip;
4070 pCurTSS32->eflags = u32EFlags;
4071 pCurTSS32->eax = pCtx->eax;
4072 pCurTSS32->ecx = pCtx->ecx;
4073 pCurTSS32->edx = pCtx->edx;
4074 pCurTSS32->ebx = pCtx->ebx;
4075 pCurTSS32->esp = pCtx->esp;
4076 pCurTSS32->ebp = pCtx->ebp;
4077 pCurTSS32->esi = pCtx->esi;
4078 pCurTSS32->edi = pCtx->edi;
4079 pCurTSS32->es = pCtx->es.Sel;
4080 pCurTSS32->cs = pCtx->cs.Sel;
4081 pCurTSS32->ss = pCtx->ss.Sel;
4082 pCurTSS32->ds = pCtx->ds.Sel;
4083 pCurTSS32->fs = pCtx->fs.Sel;
4084 pCurTSS32->gs = pCtx->gs.Sel;
4085
4086 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4087 if (rcStrict != VINF_SUCCESS)
4088 {
4089 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4090 VBOXSTRICTRC_VAL(rcStrict)));
4091 return rcStrict;
4092 }
4093 }
4094 else
4095 {
4096 /*
4097 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4098 */
4099 void *pvCurTSS16;
4100 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4101 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4102 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4103 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4104 if (rcStrict != VINF_SUCCESS)
4105 {
4106 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4107 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4108 return rcStrict;
4109 }
4110
4111 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4112 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4113 pCurTSS16->ip = uNextEip;
4114 pCurTSS16->flags = u32EFlags;
4115 pCurTSS16->ax = pCtx->ax;
4116 pCurTSS16->cx = pCtx->cx;
4117 pCurTSS16->dx = pCtx->dx;
4118 pCurTSS16->bx = pCtx->bx;
4119 pCurTSS16->sp = pCtx->sp;
4120 pCurTSS16->bp = pCtx->bp;
4121 pCurTSS16->si = pCtx->si;
4122 pCurTSS16->di = pCtx->di;
4123 pCurTSS16->es = pCtx->es.Sel;
4124 pCurTSS16->cs = pCtx->cs.Sel;
4125 pCurTSS16->ss = pCtx->ss.Sel;
4126 pCurTSS16->ds = pCtx->ds.Sel;
4127
4128 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4129 if (rcStrict != VINF_SUCCESS)
4130 {
4131 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4132 VBOXSTRICTRC_VAL(rcStrict)));
4133 return rcStrict;
4134 }
4135 }
4136
4137 /*
4138 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4139 */
4140 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4141 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4142 {
4143 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4144 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4145 pNewTSS->selPrev = pCtx->tr.Sel;
4146 }
4147
4148 /*
4149 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4150 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4151 */
4152 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4153 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4154 bool fNewDebugTrap;
4155 if (fIsNewTSS386)
4156 {
4157 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4158 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4159 uNewEip = pNewTSS32->eip;
4160 uNewEflags = pNewTSS32->eflags;
4161 uNewEax = pNewTSS32->eax;
4162 uNewEcx = pNewTSS32->ecx;
4163 uNewEdx = pNewTSS32->edx;
4164 uNewEbx = pNewTSS32->ebx;
4165 uNewEsp = pNewTSS32->esp;
4166 uNewEbp = pNewTSS32->ebp;
4167 uNewEsi = pNewTSS32->esi;
4168 uNewEdi = pNewTSS32->edi;
4169 uNewES = pNewTSS32->es;
4170 uNewCS = pNewTSS32->cs;
4171 uNewSS = pNewTSS32->ss;
4172 uNewDS = pNewTSS32->ds;
4173 uNewFS = pNewTSS32->fs;
4174 uNewGS = pNewTSS32->gs;
4175 uNewLdt = pNewTSS32->selLdt;
4176 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4177 }
4178 else
4179 {
4180 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4181 uNewCr3 = 0;
4182 uNewEip = pNewTSS16->ip;
4183 uNewEflags = pNewTSS16->flags;
4184 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4185 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4186 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4187 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4188 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4189 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4190 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4191 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4192 uNewES = pNewTSS16->es;
4193 uNewCS = pNewTSS16->cs;
4194 uNewSS = pNewTSS16->ss;
4195 uNewDS = pNewTSS16->ds;
4196 uNewFS = 0;
4197 uNewGS = 0;
4198 uNewLdt = pNewTSS16->selLdt;
4199 fNewDebugTrap = false;
4200 }
4201
4202 if (GCPtrNewTSS == GCPtrCurTSS)
4203 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4204 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4205
4206 /*
4207 * We're done accessing the new TSS.
4208 */
4209 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4210 if (rcStrict != VINF_SUCCESS)
4211 {
4212 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4213 return rcStrict;
4214 }
4215
4216 /*
4217 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4218 */
4219 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4220 {
4221 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4222 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4223 if (rcStrict != VINF_SUCCESS)
4224 {
4225 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4226 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4227 return rcStrict;
4228 }
4229
4230 /* Check that the descriptor indicates the new TSS is available (not busy). */
4231 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4232 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4233 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4234
4235 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4236 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4237 if (rcStrict != VINF_SUCCESS)
4238 {
4239 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4240 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4241 return rcStrict;
4242 }
4243 }
4244
4245 /*
4246 * From this point on, we're technically in the new task. We will defer exceptions
4247 * until the completion of the task switch but before executing any instructions in the new task.
4248 */
4249 pCtx->tr.Sel = SelTSS;
4250 pCtx->tr.ValidSel = SelTSS;
4251 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4252 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4253 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4254 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4255 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4256
4257 /* Set the busy bit in TR. */
4258 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4259 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4260 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4261 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4262 {
4263 uNewEflags |= X86_EFL_NT;
4264 }
4265
4266 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4267 pCtx->cr0 |= X86_CR0_TS;
4268 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4269
4270 pCtx->eip = uNewEip;
4271 pCtx->eax = uNewEax;
4272 pCtx->ecx = uNewEcx;
4273 pCtx->edx = uNewEdx;
4274 pCtx->ebx = uNewEbx;
4275 pCtx->esp = uNewEsp;
4276 pCtx->ebp = uNewEbp;
4277 pCtx->esi = uNewEsi;
4278 pCtx->edi = uNewEdi;
4279
4280 uNewEflags &= X86_EFL_LIVE_MASK;
4281 uNewEflags |= X86_EFL_RA1_MASK;
4282 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4283
4284 /*
4285 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4286 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4287 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4288 */
4289 pCtx->es.Sel = uNewES;
4290 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4291
4292 pCtx->cs.Sel = uNewCS;
4293 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4294
4295 pCtx->ss.Sel = uNewSS;
4296 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4297
4298 pCtx->ds.Sel = uNewDS;
4299 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4300
4301 pCtx->fs.Sel = uNewFS;
4302 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4303
4304 pCtx->gs.Sel = uNewGS;
4305 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4306 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4307
4308 pCtx->ldtr.Sel = uNewLdt;
4309 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4310 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4311 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4312
4313 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4314 {
4315 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4316 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4317 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4318 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4319 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4320 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4321 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4322 }
4323
4324 /*
4325 * Switch CR3 for the new task.
4326 */
4327 if ( fIsNewTSS386
4328 && (pCtx->cr0 & X86_CR0_PG))
4329 {
4330 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4331 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4332 {
4333 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4334 AssertRCSuccessReturn(rc, rc);
4335 }
4336 else
4337 pCtx->cr3 = uNewCr3;
4338
4339 /* Inform PGM. */
4340 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4341 {
4342 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4343 AssertRCReturn(rc, rc);
4344 /* ignore informational status codes */
4345 }
4346 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4347 }
4348
4349 /*
4350 * Switch LDTR for the new task.
4351 */
4352 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4353 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4354 else
4355 {
4356 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4357
4358 IEMSELDESC DescNewLdt;
4359 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4360 if (rcStrict != VINF_SUCCESS)
4361 {
4362 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4363 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4364 return rcStrict;
4365 }
4366 if ( !DescNewLdt.Legacy.Gen.u1Present
4367 || DescNewLdt.Legacy.Gen.u1DescType
4368 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4369 {
4370 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4371 uNewLdt, DescNewLdt.Legacy.u));
4372 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4373 }
4374
4375 pCtx->ldtr.ValidSel = uNewLdt;
4376 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4377 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4378 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4379 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4380 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4381 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4382 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4383 }
4384
4385 IEMSELDESC DescSS;
4386 if (IEM_IS_V86_MODE(pVCpu))
4387 {
4388 pVCpu->iem.s.uCpl = 3;
4389 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4390 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4391 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4392 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4393 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4394 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4395
4396 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4397 DescSS.Legacy.u = 0;
4398 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4399 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4400 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4401 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4402 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4403 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4404 DescSS.Legacy.Gen.u2Dpl = 3;
4405 }
4406 else
4407 {
4408 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4409
4410 /*
4411 * Load the stack segment for the new task.
4412 */
4413 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4414 {
4415 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4416 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4417 }
4418
4419 /* Fetch the descriptor. */
4420 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4421 if (rcStrict != VINF_SUCCESS)
4422 {
4423 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4424 VBOXSTRICTRC_VAL(rcStrict)));
4425 return rcStrict;
4426 }
4427
4428 /* SS must be a data segment and writable. */
4429 if ( !DescSS.Legacy.Gen.u1DescType
4430 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4431 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4432 {
4433 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4434 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4435 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4436 }
4437
4438 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4439 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4440 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4441 {
4442 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4443 uNewCpl));
4444 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4445 }
4446
4447 /* Is it there? */
4448 if (!DescSS.Legacy.Gen.u1Present)
4449 {
4450 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4451 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4452 }
4453
4454 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4455 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4456
4457 /* Set the accessed bit before committing the result into SS. */
4458 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4459 {
4460 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4461 if (rcStrict != VINF_SUCCESS)
4462 return rcStrict;
4463 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4464 }
4465
4466 /* Commit SS. */
4467 pCtx->ss.Sel = uNewSS;
4468 pCtx->ss.ValidSel = uNewSS;
4469 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4470 pCtx->ss.u32Limit = cbLimit;
4471 pCtx->ss.u64Base = u64Base;
4472 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4473 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4474
4475 /* CPL has changed, update IEM before loading rest of segments. */
4476 pVCpu->iem.s.uCpl = uNewCpl;
4477
4478 /*
4479 * Load the data segments for the new task.
4480 */
4481 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4482 if (rcStrict != VINF_SUCCESS)
4483 return rcStrict;
4484 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4485 if (rcStrict != VINF_SUCCESS)
4486 return rcStrict;
4487 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4488 if (rcStrict != VINF_SUCCESS)
4489 return rcStrict;
4490 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4491 if (rcStrict != VINF_SUCCESS)
4492 return rcStrict;
4493
4494 /*
4495 * Load the code segment for the new task.
4496 */
4497 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4498 {
4499 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4500 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4501 }
4502
4503 /* Fetch the descriptor. */
4504 IEMSELDESC DescCS;
4505 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4506 if (rcStrict != VINF_SUCCESS)
4507 {
4508 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4509 return rcStrict;
4510 }
4511
4512 /* CS must be a code segment. */
4513 if ( !DescCS.Legacy.Gen.u1DescType
4514 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4515 {
4516 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4517 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4518 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4519 }
4520
4521 /* For conforming CS, DPL must be less than or equal to the RPL. */
4522 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4523 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4524 {
4525 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4526 DescCS.Legacy.Gen.u2Dpl));
4527 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4528 }
4529
4530 /* For non-conforming CS, DPL must match RPL. */
4531 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4532 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4533 {
4534 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4535 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4536 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4537 }
4538
4539 /* Is it there? */
4540 if (!DescCS.Legacy.Gen.u1Present)
4541 {
4542 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4543 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4544 }
4545
4546 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4547 u64Base = X86DESC_BASE(&DescCS.Legacy);
4548
4549 /* Set the accessed bit before committing the result into CS. */
4550 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4551 {
4552 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4553 if (rcStrict != VINF_SUCCESS)
4554 return rcStrict;
4555 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4556 }
4557
4558 /* Commit CS. */
4559 pCtx->cs.Sel = uNewCS;
4560 pCtx->cs.ValidSel = uNewCS;
4561 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4562 pCtx->cs.u32Limit = cbLimit;
4563 pCtx->cs.u64Base = u64Base;
4564 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4565 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4566 }
4567
4568 /** @todo Debug trap. */
4569 if (fIsNewTSS386 && fNewDebugTrap)
4570 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4571
4572 /*
4573 * Construct the error code masks based on what caused this task switch.
4574 * See Intel Instruction reference for INT.
4575 */
4576 uint16_t uExt;
4577 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4578 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4579 {
4580 uExt = 1;
4581 }
4582 else
4583 uExt = 0;
4584
4585 /*
4586 * Push any error code on to the new stack.
4587 */
4588 if (fFlags & IEM_XCPT_FLAGS_ERR)
4589 {
4590 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4591 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4592 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4593
4594 /* Check that there is sufficient space on the stack. */
4595 /** @todo Factor out segment limit checking for normal/expand down segments
4596 * into a separate function. */
4597 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4598 {
4599 if ( pCtx->esp - 1 > cbLimitSS
4600 || pCtx->esp < cbStackFrame)
4601 {
4602 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4603 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4604 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4605 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4606 }
4607 }
4608 else
4609 {
4610 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4611 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4612 {
4613 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4614 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4615 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4616 }
4617 }
4618
4619
4620 if (fIsNewTSS386)
4621 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4622 else
4623 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4624 if (rcStrict != VINF_SUCCESS)
4625 {
4626 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4627 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4628 return rcStrict;
4629 }
4630 }
4631
4632 /* Check the new EIP against the new CS limit. */
4633 if (pCtx->eip > pCtx->cs.u32Limit)
4634 {
4635 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4636 pCtx->eip, pCtx->cs.u32Limit));
4637 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4638 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4639 }
4640
4641 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4642 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4643}
4644
4645
4646/**
4647 * Implements exceptions and interrupts for protected mode.
4648 *
4649 * @returns VBox strict status code.
4650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4651 * @param pCtx The CPU context.
4652 * @param cbInstr The number of bytes to offset rIP by in the return
4653 * address.
4654 * @param u8Vector The interrupt / exception vector number.
4655 * @param fFlags The flags.
4656 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4657 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4658 */
4659IEM_STATIC VBOXSTRICTRC
4660iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4661 PCPUMCTX pCtx,
4662 uint8_t cbInstr,
4663 uint8_t u8Vector,
4664 uint32_t fFlags,
4665 uint16_t uErr,
4666 uint64_t uCr2)
4667{
4668 /*
4669 * Read the IDT entry.
4670 */
4671 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4672 {
4673 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4674 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4675 }
4676 X86DESC Idte;
4677 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4678 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4679 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4680 return rcStrict;
4681 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4682 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4683 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4684
4685 /*
4686 * Check the descriptor type, DPL and such.
4687 * ASSUMES this is done in the same order as described for call-gate calls.
4688 */
4689 if (Idte.Gate.u1DescType)
4690 {
4691 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4692 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4693 }
4694 bool fTaskGate = false;
4695 uint8_t f32BitGate = true;
4696 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4697 switch (Idte.Gate.u4Type)
4698 {
4699 case X86_SEL_TYPE_SYS_UNDEFINED:
4700 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4701 case X86_SEL_TYPE_SYS_LDT:
4702 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4703 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4704 case X86_SEL_TYPE_SYS_UNDEFINED2:
4705 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4706 case X86_SEL_TYPE_SYS_UNDEFINED3:
4707 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4708 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4709 case X86_SEL_TYPE_SYS_UNDEFINED4:
4710 {
4711 /** @todo check what actually happens when the type is wrong...
4712 * esp. call gates. */
4713 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4714 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4715 }
4716
4717 case X86_SEL_TYPE_SYS_286_INT_GATE:
4718 f32BitGate = false;
4719 RT_FALL_THRU();
4720 case X86_SEL_TYPE_SYS_386_INT_GATE:
4721 fEflToClear |= X86_EFL_IF;
4722 break;
4723
4724 case X86_SEL_TYPE_SYS_TASK_GATE:
4725 fTaskGate = true;
4726#ifndef IEM_IMPLEMENTS_TASKSWITCH
4727 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4728#endif
4729 break;
4730
4731 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4732 f32BitGate = false;
4733 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4734 break;
4735
4736 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4737 }
4738
4739 /* Check DPL against CPL if applicable. */
4740 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4741 {
4742 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4743 {
4744 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4745 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4746 }
4747 }
4748
4749 /* Is it there? */
4750 if (!Idte.Gate.u1Present)
4751 {
4752 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4753 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4754 }
4755
4756 /* Is it a task-gate? */
4757 if (fTaskGate)
4758 {
4759 /*
4760 * Construct the error code masks based on what caused this task switch.
4761 * See Intel Instruction reference for INT.
4762 */
4763 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4764 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4765 RTSEL SelTSS = Idte.Gate.u16Sel;
4766
4767 /*
4768 * Fetch the TSS descriptor in the GDT.
4769 */
4770 IEMSELDESC DescTSS;
4771 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4772 if (rcStrict != VINF_SUCCESS)
4773 {
4774 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4775 VBOXSTRICTRC_VAL(rcStrict)));
4776 return rcStrict;
4777 }
4778
4779 /* The TSS descriptor must be a system segment and be available (not busy). */
4780 if ( DescTSS.Legacy.Gen.u1DescType
4781 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4782 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4783 {
4784 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4785 u8Vector, SelTSS, DescTSS.Legacy.au64));
4786 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4787 }
4788
4789 /* The TSS must be present. */
4790 if (!DescTSS.Legacy.Gen.u1Present)
4791 {
4792 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4793 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4794 }
4795
4796 /* Do the actual task switch. */
4797 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4798 }
4799
4800 /* A null CS is bad. */
4801 RTSEL NewCS = Idte.Gate.u16Sel;
4802 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4803 {
4804 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4805 return iemRaiseGeneralProtectionFault0(pVCpu);
4806 }
4807
4808 /* Fetch the descriptor for the new CS. */
4809 IEMSELDESC DescCS;
4810 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4811 if (rcStrict != VINF_SUCCESS)
4812 {
4813 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4814 return rcStrict;
4815 }
4816
4817 /* Must be a code segment. */
4818 if (!DescCS.Legacy.Gen.u1DescType)
4819 {
4820 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4821 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4822 }
4823 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4824 {
4825 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4826 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4827 }
4828
4829 /* Don't allow lowering the privilege level. */
4830 /** @todo Does the lowering of privileges apply to software interrupts
4831 * only? This has bearings on the more-privileged or
4832 * same-privilege stack behavior further down. A testcase would
4833 * be nice. */
4834 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4835 {
4836 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4837 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4838 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4839 }
4840
4841 /* Make sure the selector is present. */
4842 if (!DescCS.Legacy.Gen.u1Present)
4843 {
4844 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4845 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4846 }
4847
4848 /* Check the new EIP against the new CS limit. */
4849 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4850 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4851 ? Idte.Gate.u16OffsetLow
4852 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4853 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4854 if (uNewEip > cbLimitCS)
4855 {
4856 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4857 u8Vector, uNewEip, cbLimitCS, NewCS));
4858 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4859 }
4860 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4861
4862 /* Calc the flag image to push. */
4863 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4864 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4865 fEfl &= ~X86_EFL_RF;
4866 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4867 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4868
4869 /* From V8086 mode only go to CPL 0. */
4870 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4871 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4872 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4873 {
4874 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4875 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4876 }
4877
4878 /*
4879 * If the privilege level changes, we need to get a new stack from the TSS.
4880 * This in turns means validating the new SS and ESP...
4881 */
4882 if (uNewCpl != pVCpu->iem.s.uCpl)
4883 {
4884 RTSEL NewSS;
4885 uint32_t uNewEsp;
4886 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4887 if (rcStrict != VINF_SUCCESS)
4888 return rcStrict;
4889
4890 IEMSELDESC DescSS;
4891 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4892 if (rcStrict != VINF_SUCCESS)
4893 return rcStrict;
4894 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4895 if (!DescSS.Legacy.Gen.u1DefBig)
4896 {
4897 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4898 uNewEsp = (uint16_t)uNewEsp;
4899 }
4900
4901 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4902
4903 /* Check that there is sufficient space for the stack frame. */
4904 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4905 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4906 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4907 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4908
4909 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4910 {
4911 if ( uNewEsp - 1 > cbLimitSS
4912 || uNewEsp < cbStackFrame)
4913 {
4914 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4915 u8Vector, NewSS, uNewEsp, cbStackFrame));
4916 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4917 }
4918 }
4919 else
4920 {
4921 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4922 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4923 {
4924 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4925 u8Vector, NewSS, uNewEsp, cbStackFrame));
4926 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4927 }
4928 }
4929
4930 /*
4931 * Start making changes.
4932 */
4933
4934 /* Set the new CPL so that stack accesses use it. */
4935 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4936 pVCpu->iem.s.uCpl = uNewCpl;
4937
4938 /* Create the stack frame. */
4939 RTPTRUNION uStackFrame;
4940 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4941 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4942 if (rcStrict != VINF_SUCCESS)
4943 return rcStrict;
4944 void * const pvStackFrame = uStackFrame.pv;
4945 if (f32BitGate)
4946 {
4947 if (fFlags & IEM_XCPT_FLAGS_ERR)
4948 *uStackFrame.pu32++ = uErr;
4949 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4950 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4951 uStackFrame.pu32[2] = fEfl;
4952 uStackFrame.pu32[3] = pCtx->esp;
4953 uStackFrame.pu32[4] = pCtx->ss.Sel;
4954 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4955 if (fEfl & X86_EFL_VM)
4956 {
4957 uStackFrame.pu32[1] = pCtx->cs.Sel;
4958 uStackFrame.pu32[5] = pCtx->es.Sel;
4959 uStackFrame.pu32[6] = pCtx->ds.Sel;
4960 uStackFrame.pu32[7] = pCtx->fs.Sel;
4961 uStackFrame.pu32[8] = pCtx->gs.Sel;
4962 }
4963 }
4964 else
4965 {
4966 if (fFlags & IEM_XCPT_FLAGS_ERR)
4967 *uStackFrame.pu16++ = uErr;
4968 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4969 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4970 uStackFrame.pu16[2] = fEfl;
4971 uStackFrame.pu16[3] = pCtx->sp;
4972 uStackFrame.pu16[4] = pCtx->ss.Sel;
4973 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4974 if (fEfl & X86_EFL_VM)
4975 {
4976 uStackFrame.pu16[1] = pCtx->cs.Sel;
4977 uStackFrame.pu16[5] = pCtx->es.Sel;
4978 uStackFrame.pu16[6] = pCtx->ds.Sel;
4979 uStackFrame.pu16[7] = pCtx->fs.Sel;
4980 uStackFrame.pu16[8] = pCtx->gs.Sel;
4981 }
4982 }
4983 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4984 if (rcStrict != VINF_SUCCESS)
4985 return rcStrict;
4986
4987 /* Mark the selectors 'accessed' (hope this is the correct time). */
4988 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4989 * after pushing the stack frame? (Write protect the gdt + stack to
4990 * find out.) */
4991 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4992 {
4993 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4994 if (rcStrict != VINF_SUCCESS)
4995 return rcStrict;
4996 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4997 }
4998
4999 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5000 {
5001 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5002 if (rcStrict != VINF_SUCCESS)
5003 return rcStrict;
5004 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5005 }
5006
5007 /*
5008 * Start comitting the register changes (joins with the DPL=CPL branch).
5009 */
5010 pCtx->ss.Sel = NewSS;
5011 pCtx->ss.ValidSel = NewSS;
5012 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5013 pCtx->ss.u32Limit = cbLimitSS;
5014 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5015 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5016 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5017 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5018 * SP is loaded).
5019 * Need to check the other combinations too:
5020 * - 16-bit TSS, 32-bit handler
5021 * - 32-bit TSS, 16-bit handler */
5022 if (!pCtx->ss.Attr.n.u1DefBig)
5023 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5024 else
5025 pCtx->rsp = uNewEsp - cbStackFrame;
5026
5027 if (fEfl & X86_EFL_VM)
5028 {
5029 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5030 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5031 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5032 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5033 }
5034 }
5035 /*
5036 * Same privilege, no stack change and smaller stack frame.
5037 */
5038 else
5039 {
5040 uint64_t uNewRsp;
5041 RTPTRUNION uStackFrame;
5042 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5043 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5044 if (rcStrict != VINF_SUCCESS)
5045 return rcStrict;
5046 void * const pvStackFrame = uStackFrame.pv;
5047
5048 if (f32BitGate)
5049 {
5050 if (fFlags & IEM_XCPT_FLAGS_ERR)
5051 *uStackFrame.pu32++ = uErr;
5052 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5053 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5054 uStackFrame.pu32[2] = fEfl;
5055 }
5056 else
5057 {
5058 if (fFlags & IEM_XCPT_FLAGS_ERR)
5059 *uStackFrame.pu16++ = uErr;
5060 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5061 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5062 uStackFrame.pu16[2] = fEfl;
5063 }
5064 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5065 if (rcStrict != VINF_SUCCESS)
5066 return rcStrict;
5067
5068 /* Mark the CS selector as 'accessed'. */
5069 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5070 {
5071 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5072 if (rcStrict != VINF_SUCCESS)
5073 return rcStrict;
5074 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5075 }
5076
5077 /*
5078 * Start committing the register changes (joins with the other branch).
5079 */
5080 pCtx->rsp = uNewRsp;
5081 }
5082
5083 /* ... register committing continues. */
5084 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5085 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5086 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5087 pCtx->cs.u32Limit = cbLimitCS;
5088 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5089 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5090
5091 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5092 fEfl &= ~fEflToClear;
5093 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5094
5095 if (fFlags & IEM_XCPT_FLAGS_CR2)
5096 pCtx->cr2 = uCr2;
5097
5098 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5099 iemRaiseXcptAdjustState(pCtx, u8Vector);
5100
5101 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5102}
5103
5104
5105/**
5106 * Implements exceptions and interrupts for long mode.
5107 *
5108 * @returns VBox strict status code.
5109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5110 * @param pCtx The CPU context.
5111 * @param cbInstr The number of bytes to offset rIP by in the return
5112 * address.
5113 * @param u8Vector The interrupt / exception vector number.
5114 * @param fFlags The flags.
5115 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5116 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5117 */
5118IEM_STATIC VBOXSTRICTRC
5119iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5120 PCPUMCTX pCtx,
5121 uint8_t cbInstr,
5122 uint8_t u8Vector,
5123 uint32_t fFlags,
5124 uint16_t uErr,
5125 uint64_t uCr2)
5126{
5127 /*
5128 * Read the IDT entry.
5129 */
5130 uint16_t offIdt = (uint16_t)u8Vector << 4;
5131 if (pCtx->idtr.cbIdt < offIdt + 7)
5132 {
5133 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5134 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5135 }
5136 X86DESC64 Idte;
5137 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5138 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5139 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5140 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5141 return rcStrict;
5142 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5143 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5144 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5145
5146 /*
5147 * Check the descriptor type, DPL and such.
5148 * ASSUMES this is done in the same order as described for call-gate calls.
5149 */
5150 if (Idte.Gate.u1DescType)
5151 {
5152 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5153 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5154 }
5155 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5156 switch (Idte.Gate.u4Type)
5157 {
5158 case AMD64_SEL_TYPE_SYS_INT_GATE:
5159 fEflToClear |= X86_EFL_IF;
5160 break;
5161 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5162 break;
5163
5164 default:
5165 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5166 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5167 }
5168
5169 /* Check DPL against CPL if applicable. */
5170 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5171 {
5172 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5173 {
5174 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5175 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5176 }
5177 }
5178
5179 /* Is it there? */
5180 if (!Idte.Gate.u1Present)
5181 {
5182 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5183 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5184 }
5185
5186 /* A null CS is bad. */
5187 RTSEL NewCS = Idte.Gate.u16Sel;
5188 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5189 {
5190 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5191 return iemRaiseGeneralProtectionFault0(pVCpu);
5192 }
5193
5194 /* Fetch the descriptor for the new CS. */
5195 IEMSELDESC DescCS;
5196 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5197 if (rcStrict != VINF_SUCCESS)
5198 {
5199 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5200 return rcStrict;
5201 }
5202
5203 /* Must be a 64-bit code segment. */
5204 if (!DescCS.Long.Gen.u1DescType)
5205 {
5206 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5207 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5208 }
5209 if ( !DescCS.Long.Gen.u1Long
5210 || DescCS.Long.Gen.u1DefBig
5211 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5212 {
5213 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5214 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5215 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5216 }
5217
5218 /* Don't allow lowering the privilege level. For non-conforming CS
5219 selectors, the CS.DPL sets the privilege level the trap/interrupt
5220 handler runs at. For conforming CS selectors, the CPL remains
5221 unchanged, but the CS.DPL must be <= CPL. */
5222 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5223 * when CPU in Ring-0. Result \#GP? */
5224 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5225 {
5226 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5227 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5228 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5229 }
5230
5231
5232 /* Make sure the selector is present. */
5233 if (!DescCS.Legacy.Gen.u1Present)
5234 {
5235 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5236 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5237 }
5238
5239 /* Check that the new RIP is canonical. */
5240 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5241 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5242 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5243 if (!IEM_IS_CANONICAL(uNewRip))
5244 {
5245 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5246 return iemRaiseGeneralProtectionFault0(pVCpu);
5247 }
5248
5249 /*
5250 * If the privilege level changes or if the IST isn't zero, we need to get
5251 * a new stack from the TSS.
5252 */
5253 uint64_t uNewRsp;
5254 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5255 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5256 if ( uNewCpl != pVCpu->iem.s.uCpl
5257 || Idte.Gate.u3IST != 0)
5258 {
5259 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5260 if (rcStrict != VINF_SUCCESS)
5261 return rcStrict;
5262 }
5263 else
5264 uNewRsp = pCtx->rsp;
5265 uNewRsp &= ~(uint64_t)0xf;
5266
5267 /*
5268 * Calc the flag image to push.
5269 */
5270 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5271 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5272 fEfl &= ~X86_EFL_RF;
5273 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5274 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5275
5276 /*
5277 * Start making changes.
5278 */
5279 /* Set the new CPL so that stack accesses use it. */
5280 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5281 pVCpu->iem.s.uCpl = uNewCpl;
5282
5283 /* Create the stack frame. */
5284 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5285 RTPTRUNION uStackFrame;
5286 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5287 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5288 if (rcStrict != VINF_SUCCESS)
5289 return rcStrict;
5290 void * const pvStackFrame = uStackFrame.pv;
5291
5292 if (fFlags & IEM_XCPT_FLAGS_ERR)
5293 *uStackFrame.pu64++ = uErr;
5294 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5295 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5296 uStackFrame.pu64[2] = fEfl;
5297 uStackFrame.pu64[3] = pCtx->rsp;
5298 uStackFrame.pu64[4] = pCtx->ss.Sel;
5299 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5300 if (rcStrict != VINF_SUCCESS)
5301 return rcStrict;
5302
5303 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5304 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5305 * after pushing the stack frame? (Write protect the gdt + stack to
5306 * find out.) */
5307 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5308 {
5309 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5310 if (rcStrict != VINF_SUCCESS)
5311 return rcStrict;
5312 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5313 }
5314
5315 /*
5316 * Start comitting the register changes.
5317 */
5318 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5319 * hidden registers when interrupting 32-bit or 16-bit code! */
5320 if (uNewCpl != uOldCpl)
5321 {
5322 pCtx->ss.Sel = 0 | uNewCpl;
5323 pCtx->ss.ValidSel = 0 | uNewCpl;
5324 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5325 pCtx->ss.u32Limit = UINT32_MAX;
5326 pCtx->ss.u64Base = 0;
5327 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5328 }
5329 pCtx->rsp = uNewRsp - cbStackFrame;
5330 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5331 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5332 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5333 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5334 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5335 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5336 pCtx->rip = uNewRip;
5337
5338 fEfl &= ~fEflToClear;
5339 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5340
5341 if (fFlags & IEM_XCPT_FLAGS_CR2)
5342 pCtx->cr2 = uCr2;
5343
5344 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5345 iemRaiseXcptAdjustState(pCtx, u8Vector);
5346
5347 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5348}
5349
5350
5351/**
5352 * Implements exceptions and interrupts.
5353 *
5354 * All exceptions and interrupts goes thru this function!
5355 *
5356 * @returns VBox strict status code.
5357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5358 * @param cbInstr The number of bytes to offset rIP by in the return
5359 * address.
5360 * @param u8Vector The interrupt / exception vector number.
5361 * @param fFlags The flags.
5362 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5363 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5364 */
5365DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5366iemRaiseXcptOrInt(PVMCPU pVCpu,
5367 uint8_t cbInstr,
5368 uint8_t u8Vector,
5369 uint32_t fFlags,
5370 uint16_t uErr,
5371 uint64_t uCr2)
5372{
5373 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5374#ifdef IN_RING0
5375 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5376 AssertRCReturn(rc, rc);
5377#endif
5378
5379#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5380 /*
5381 * Flush prefetch buffer
5382 */
5383 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5384#endif
5385
5386 /*
5387 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5388 */
5389 if ( pCtx->eflags.Bits.u1VM
5390 && pCtx->eflags.Bits.u2IOPL != 3
5391 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5392 && (pCtx->cr0 & X86_CR0_PE) )
5393 {
5394 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5395 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5396 u8Vector = X86_XCPT_GP;
5397 uErr = 0;
5398 }
5399#ifdef DBGFTRACE_ENABLED
5400 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5401 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5402 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5403#endif
5404
5405#ifdef VBOX_WITH_NESTED_HWVIRT
5406 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5407 {
5408 /*
5409 * If the event is being injected as part of VMRUN, it isn't subject to event
5410 * intercepts in the nested-guest. However, secondary exceptions that occur
5411 * during injection of any event -are- subject to exception intercepts.
5412 * See AMD spec. 15.20 "Event Injection".
5413 */
5414 if (!pCtx->hwvirt.svm.fInterceptEvents)
5415 pCtx->hwvirt.svm.fInterceptEvents = 1;
5416 else
5417 {
5418 /*
5419 * Check and handle if the event being raised is intercepted.
5420 */
5421 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5422 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5423 return rcStrict0;
5424 }
5425 }
5426#endif /* VBOX_WITH_NESTED_HWVIRT */
5427
5428 /*
5429 * Do recursion accounting.
5430 */
5431 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5432 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5433 if (pVCpu->iem.s.cXcptRecursions == 0)
5434 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5435 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5436 else
5437 {
5438 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5439 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5440 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5441
5442 if (pVCpu->iem.s.cXcptRecursions >= 3)
5443 {
5444#ifdef DEBUG_bird
5445 AssertFailed();
5446#endif
5447 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5448 }
5449
5450 /*
5451 * Evaluate the sequence of recurring events.
5452 */
5453 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5454 NULL /* pXcptRaiseInfo */);
5455 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5456 { /* likely */ }
5457 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5458 {
5459 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5460 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5461 u8Vector = X86_XCPT_DF;
5462 uErr = 0;
5463 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5464 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5465 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5466 }
5467 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5468 {
5469 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5470 return iemInitiateCpuShutdown(pVCpu);
5471 }
5472 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5473 {
5474 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5475 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5476 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5477 return VERR_EM_GUEST_CPU_HANG;
5478 }
5479 else
5480 {
5481 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5482 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5483 return VERR_IEM_IPE_9;
5484 }
5485
5486 /*
5487 * The 'EXT' bit is set when an exception occurs during deliver of an external
5488 * event (such as an interrupt or earlier exception)[1]. Privileged software
5489 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5490 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5491 *
5492 * [1] - Intel spec. 6.13 "Error Code"
5493 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5494 * [3] - Intel Instruction reference for INT n.
5495 */
5496 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5497 && (fFlags & IEM_XCPT_FLAGS_ERR)
5498 && u8Vector != X86_XCPT_PF
5499 && u8Vector != X86_XCPT_DF)
5500 {
5501 uErr |= X86_TRAP_ERR_EXTERNAL;
5502 }
5503 }
5504
5505 pVCpu->iem.s.cXcptRecursions++;
5506 pVCpu->iem.s.uCurXcpt = u8Vector;
5507 pVCpu->iem.s.fCurXcpt = fFlags;
5508 pVCpu->iem.s.uCurXcptErr = uErr;
5509 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5510
5511 /*
5512 * Extensive logging.
5513 */
5514#if defined(LOG_ENABLED) && defined(IN_RING3)
5515 if (LogIs3Enabled())
5516 {
5517 PVM pVM = pVCpu->CTX_SUFF(pVM);
5518 char szRegs[4096];
5519 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5520 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5521 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5522 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5523 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5524 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5525 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5526 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5527 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5528 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5529 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5530 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5531 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5532 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5533 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5534 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5535 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5536 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5537 " efer=%016VR{efer}\n"
5538 " pat=%016VR{pat}\n"
5539 " sf_mask=%016VR{sf_mask}\n"
5540 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5541 " lstar=%016VR{lstar}\n"
5542 " star=%016VR{star} cstar=%016VR{cstar}\n"
5543 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5544 );
5545
5546 char szInstr[256];
5547 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5548 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5549 szInstr, sizeof(szInstr), NULL);
5550 Log3(("%s%s\n", szRegs, szInstr));
5551 }
5552#endif /* LOG_ENABLED */
5553
5554 /*
5555 * Call the mode specific worker function.
5556 */
5557 VBOXSTRICTRC rcStrict;
5558 if (!(pCtx->cr0 & X86_CR0_PE))
5559 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5560 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5561 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5562 else
5563 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5564
5565 /* Flush the prefetch buffer. */
5566#ifdef IEM_WITH_CODE_TLB
5567 pVCpu->iem.s.pbInstrBuf = NULL;
5568#else
5569 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5570#endif
5571
5572 /*
5573 * Unwind.
5574 */
5575 pVCpu->iem.s.cXcptRecursions--;
5576 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5577 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5578 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5579 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5580 return rcStrict;
5581}
5582
5583#ifdef IEM_WITH_SETJMP
5584/**
5585 * See iemRaiseXcptOrInt. Will not return.
5586 */
5587IEM_STATIC DECL_NO_RETURN(void)
5588iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5589 uint8_t cbInstr,
5590 uint8_t u8Vector,
5591 uint32_t fFlags,
5592 uint16_t uErr,
5593 uint64_t uCr2)
5594{
5595 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5596 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5597}
5598#endif
5599
5600
5601/** \#DE - 00. */
5602DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5603{
5604 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5605}
5606
5607
5608/** \#DB - 01.
5609 * @note This automatically clear DR7.GD. */
5610DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5611{
5612 /** @todo set/clear RF. */
5613 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5614 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5615}
5616
5617
5618/** \#BR - 05. */
5619DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5620{
5621 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5622}
5623
5624
5625/** \#UD - 06. */
5626DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5627{
5628 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5629}
5630
5631
5632/** \#NM - 07. */
5633DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5634{
5635 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5636}
5637
5638
5639/** \#TS(err) - 0a. */
5640DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5641{
5642 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5643}
5644
5645
5646/** \#TS(tr) - 0a. */
5647DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5648{
5649 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5650 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5651}
5652
5653
5654/** \#TS(0) - 0a. */
5655DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5656{
5657 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5658 0, 0);
5659}
5660
5661
5662/** \#TS(err) - 0a. */
5663DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5664{
5665 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5666 uSel & X86_SEL_MASK_OFF_RPL, 0);
5667}
5668
5669
5670/** \#NP(err) - 0b. */
5671DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5672{
5673 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5674}
5675
5676
5677/** \#NP(sel) - 0b. */
5678DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5679{
5680 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5681 uSel & ~X86_SEL_RPL, 0);
5682}
5683
5684
5685/** \#SS(seg) - 0c. */
5686DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5687{
5688 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5689 uSel & ~X86_SEL_RPL, 0);
5690}
5691
5692
5693/** \#SS(err) - 0c. */
5694DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5695{
5696 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5697}
5698
5699
5700/** \#GP(n) - 0d. */
5701DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5702{
5703 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5704}
5705
5706
5707/** \#GP(0) - 0d. */
5708DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5709{
5710 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5711}
5712
5713#ifdef IEM_WITH_SETJMP
5714/** \#GP(0) - 0d. */
5715DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5716{
5717 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5718}
5719#endif
5720
5721
5722/** \#GP(sel) - 0d. */
5723DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5724{
5725 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5726 Sel & ~X86_SEL_RPL, 0);
5727}
5728
5729
5730/** \#GP(0) - 0d. */
5731DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5732{
5733 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5734}
5735
5736
5737/** \#GP(sel) - 0d. */
5738DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5739{
5740 NOREF(iSegReg); NOREF(fAccess);
5741 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5742 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5743}
5744
5745#ifdef IEM_WITH_SETJMP
5746/** \#GP(sel) - 0d, longjmp. */
5747DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5748{
5749 NOREF(iSegReg); NOREF(fAccess);
5750 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5751 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5752}
5753#endif
5754
5755/** \#GP(sel) - 0d. */
5756DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5757{
5758 NOREF(Sel);
5759 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5760}
5761
5762#ifdef IEM_WITH_SETJMP
5763/** \#GP(sel) - 0d, longjmp. */
5764DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5765{
5766 NOREF(Sel);
5767 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5768}
5769#endif
5770
5771
5772/** \#GP(sel) - 0d. */
5773DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5774{
5775 NOREF(iSegReg); NOREF(fAccess);
5776 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5777}
5778
5779#ifdef IEM_WITH_SETJMP
5780/** \#GP(sel) - 0d, longjmp. */
5781DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5782 uint32_t fAccess)
5783{
5784 NOREF(iSegReg); NOREF(fAccess);
5785 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5786}
5787#endif
5788
5789
5790/** \#PF(n) - 0e. */
5791DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5792{
5793 uint16_t uErr;
5794 switch (rc)
5795 {
5796 case VERR_PAGE_NOT_PRESENT:
5797 case VERR_PAGE_TABLE_NOT_PRESENT:
5798 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5799 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5800 uErr = 0;
5801 break;
5802
5803 default:
5804 AssertMsgFailed(("%Rrc\n", rc));
5805 RT_FALL_THRU();
5806 case VERR_ACCESS_DENIED:
5807 uErr = X86_TRAP_PF_P;
5808 break;
5809
5810 /** @todo reserved */
5811 }
5812
5813 if (pVCpu->iem.s.uCpl == 3)
5814 uErr |= X86_TRAP_PF_US;
5815
5816 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5817 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5818 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5819 uErr |= X86_TRAP_PF_ID;
5820
5821#if 0 /* This is so much non-sense, really. Why was it done like that? */
5822 /* Note! RW access callers reporting a WRITE protection fault, will clear
5823 the READ flag before calling. So, read-modify-write accesses (RW)
5824 can safely be reported as READ faults. */
5825 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5826 uErr |= X86_TRAP_PF_RW;
5827#else
5828 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5829 {
5830 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5831 uErr |= X86_TRAP_PF_RW;
5832 }
5833#endif
5834
5835 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5836 uErr, GCPtrWhere);
5837}
5838
5839#ifdef IEM_WITH_SETJMP
5840/** \#PF(n) - 0e, longjmp. */
5841IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5842{
5843 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5844}
5845#endif
5846
5847
5848/** \#MF(0) - 10. */
5849DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5850{
5851 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5852}
5853
5854
5855/** \#AC(0) - 11. */
5856DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5857{
5858 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5859}
5860
5861
5862/**
5863 * Macro for calling iemCImplRaiseDivideError().
5864 *
5865 * This enables us to add/remove arguments and force different levels of
5866 * inlining as we wish.
5867 *
5868 * @return Strict VBox status code.
5869 */
5870#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5871IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5872{
5873 NOREF(cbInstr);
5874 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5875}
5876
5877
5878/**
5879 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5880 *
5881 * This enables us to add/remove arguments and force different levels of
5882 * inlining as we wish.
5883 *
5884 * @return Strict VBox status code.
5885 */
5886#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5887IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5888{
5889 NOREF(cbInstr);
5890 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5891}
5892
5893
5894/**
5895 * Macro for calling iemCImplRaiseInvalidOpcode().
5896 *
5897 * This enables us to add/remove arguments and force different levels of
5898 * inlining as we wish.
5899 *
5900 * @return Strict VBox status code.
5901 */
5902#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5903IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5904{
5905 NOREF(cbInstr);
5906 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5907}
5908
5909
5910/** @} */
5911
5912
5913/*
5914 *
5915 * Helpers routines.
5916 * Helpers routines.
5917 * Helpers routines.
5918 *
5919 */
5920
5921/**
5922 * Recalculates the effective operand size.
5923 *
5924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5925 */
5926IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5927{
5928 switch (pVCpu->iem.s.enmCpuMode)
5929 {
5930 case IEMMODE_16BIT:
5931 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5932 break;
5933 case IEMMODE_32BIT:
5934 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5935 break;
5936 case IEMMODE_64BIT:
5937 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5938 {
5939 case 0:
5940 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5941 break;
5942 case IEM_OP_PRF_SIZE_OP:
5943 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5944 break;
5945 case IEM_OP_PRF_SIZE_REX_W:
5946 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5947 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5948 break;
5949 }
5950 break;
5951 default:
5952 AssertFailed();
5953 }
5954}
5955
5956
5957/**
5958 * Sets the default operand size to 64-bit and recalculates the effective
5959 * operand size.
5960 *
5961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5962 */
5963IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5964{
5965 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5966 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5967 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5968 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5969 else
5970 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5971}
5972
5973
5974/*
5975 *
5976 * Common opcode decoders.
5977 * Common opcode decoders.
5978 * Common opcode decoders.
5979 *
5980 */
5981//#include <iprt/mem.h>
5982
5983/**
5984 * Used to add extra details about a stub case.
5985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5986 */
5987IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5988{
5989#if defined(LOG_ENABLED) && defined(IN_RING3)
5990 PVM pVM = pVCpu->CTX_SUFF(pVM);
5991 char szRegs[4096];
5992 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5993 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5994 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5995 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5996 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5997 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5998 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5999 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6000 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6001 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6002 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6003 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6004 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6005 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6006 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6007 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6008 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6009 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6010 " efer=%016VR{efer}\n"
6011 " pat=%016VR{pat}\n"
6012 " sf_mask=%016VR{sf_mask}\n"
6013 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6014 " lstar=%016VR{lstar}\n"
6015 " star=%016VR{star} cstar=%016VR{cstar}\n"
6016 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6017 );
6018
6019 char szInstr[256];
6020 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6021 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6022 szInstr, sizeof(szInstr), NULL);
6023
6024 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6025#else
6026 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6027#endif
6028}
6029
6030/**
6031 * Complains about a stub.
6032 *
6033 * Providing two versions of this macro, one for daily use and one for use when
6034 * working on IEM.
6035 */
6036#if 0
6037# define IEMOP_BITCH_ABOUT_STUB() \
6038 do { \
6039 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6040 iemOpStubMsg2(pVCpu); \
6041 RTAssertPanic(); \
6042 } while (0)
6043#else
6044# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6045#endif
6046
6047/** Stubs an opcode. */
6048#define FNIEMOP_STUB(a_Name) \
6049 FNIEMOP_DEF(a_Name) \
6050 { \
6051 RT_NOREF_PV(pVCpu); \
6052 IEMOP_BITCH_ABOUT_STUB(); \
6053 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6054 } \
6055 typedef int ignore_semicolon
6056
6057/** Stubs an opcode. */
6058#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6059 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6060 { \
6061 RT_NOREF_PV(pVCpu); \
6062 RT_NOREF_PV(a_Name0); \
6063 IEMOP_BITCH_ABOUT_STUB(); \
6064 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6065 } \
6066 typedef int ignore_semicolon
6067
6068/** Stubs an opcode which currently should raise \#UD. */
6069#define FNIEMOP_UD_STUB(a_Name) \
6070 FNIEMOP_DEF(a_Name) \
6071 { \
6072 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6073 return IEMOP_RAISE_INVALID_OPCODE(); \
6074 } \
6075 typedef int ignore_semicolon
6076
6077/** Stubs an opcode which currently should raise \#UD. */
6078#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6079 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6080 { \
6081 RT_NOREF_PV(pVCpu); \
6082 RT_NOREF_PV(a_Name0); \
6083 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6084 return IEMOP_RAISE_INVALID_OPCODE(); \
6085 } \
6086 typedef int ignore_semicolon
6087
6088
6089
6090/** @name Register Access.
6091 * @{
6092 */
6093
6094/**
6095 * Gets a reference (pointer) to the specified hidden segment register.
6096 *
6097 * @returns Hidden register reference.
6098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6099 * @param iSegReg The segment register.
6100 */
6101IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6102{
6103 Assert(iSegReg < X86_SREG_COUNT);
6104 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6105 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6106
6107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6108 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6109 { /* likely */ }
6110 else
6111 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6112#else
6113 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6114#endif
6115 return pSReg;
6116}
6117
6118
6119/**
6120 * Ensures that the given hidden segment register is up to date.
6121 *
6122 * @returns Hidden register reference.
6123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6124 * @param pSReg The segment register.
6125 */
6126IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6127{
6128#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6129 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6130 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6131#else
6132 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6133 NOREF(pVCpu);
6134#endif
6135 return pSReg;
6136}
6137
6138
6139/**
6140 * Gets a reference (pointer) to the specified segment register (the selector
6141 * value).
6142 *
6143 * @returns Pointer to the selector variable.
6144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6145 * @param iSegReg The segment register.
6146 */
6147DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6148{
6149 Assert(iSegReg < X86_SREG_COUNT);
6150 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6151 return &pCtx->aSRegs[iSegReg].Sel;
6152}
6153
6154
6155/**
6156 * Fetches the selector value of a segment register.
6157 *
6158 * @returns The selector value.
6159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6160 * @param iSegReg The segment register.
6161 */
6162DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6163{
6164 Assert(iSegReg < X86_SREG_COUNT);
6165 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6166}
6167
6168
6169/**
6170 * Fetches the base address value of a segment register.
6171 *
6172 * @returns The selector value.
6173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6174 * @param iSegReg The segment register.
6175 */
6176DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6177{
6178 Assert(iSegReg < X86_SREG_COUNT);
6179 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].u64Base;
6180}
6181
6182
6183/**
6184 * Gets a reference (pointer) to the specified general purpose register.
6185 *
6186 * @returns Register reference.
6187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6188 * @param iReg The general purpose register.
6189 */
6190DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6191{
6192 Assert(iReg < 16);
6193 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6194 return &pCtx->aGRegs[iReg];
6195}
6196
6197
6198/**
6199 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6200 *
6201 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6202 *
6203 * @returns Register reference.
6204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6205 * @param iReg The register.
6206 */
6207DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6208{
6209 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6210 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6211 {
6212 Assert(iReg < 16);
6213 return &pCtx->aGRegs[iReg].u8;
6214 }
6215 /* high 8-bit register. */
6216 Assert(iReg < 8);
6217 return &pCtx->aGRegs[iReg & 3].bHi;
6218}
6219
6220
6221/**
6222 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6223 *
6224 * @returns Register reference.
6225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6226 * @param iReg The register.
6227 */
6228DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6229{
6230 Assert(iReg < 16);
6231 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6232 return &pCtx->aGRegs[iReg].u16;
6233}
6234
6235
6236/**
6237 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6238 *
6239 * @returns Register reference.
6240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6241 * @param iReg The register.
6242 */
6243DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6244{
6245 Assert(iReg < 16);
6246 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6247 return &pCtx->aGRegs[iReg].u32;
6248}
6249
6250
6251/**
6252 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6253 *
6254 * @returns Register reference.
6255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6256 * @param iReg The register.
6257 */
6258DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6259{
6260 Assert(iReg < 64);
6261 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6262 return &pCtx->aGRegs[iReg].u64;
6263}
6264
6265
6266/**
6267 * Gets a reference (pointer) to the specified segment register's base address.
6268 *
6269 * @returns Segment register base address reference.
6270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6271 * @param iSegReg The segment selector.
6272 */
6273DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6274{
6275 Assert(iSegReg < X86_SREG_COUNT);
6276 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6277 return &pCtx->aSRegs[iSegReg].u64Base;
6278}
6279
6280
6281/**
6282 * Fetches the value of a 8-bit general purpose register.
6283 *
6284 * @returns The register value.
6285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6286 * @param iReg The register.
6287 */
6288DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6289{
6290 return *iemGRegRefU8(pVCpu, iReg);
6291}
6292
6293
6294/**
6295 * Fetches the value of a 16-bit general purpose register.
6296 *
6297 * @returns The register value.
6298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6299 * @param iReg The register.
6300 */
6301DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6302{
6303 Assert(iReg < 16);
6304 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6305}
6306
6307
6308/**
6309 * Fetches the value of a 32-bit general purpose register.
6310 *
6311 * @returns The register value.
6312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6313 * @param iReg The register.
6314 */
6315DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6316{
6317 Assert(iReg < 16);
6318 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6319}
6320
6321
6322/**
6323 * Fetches the value of a 64-bit general purpose register.
6324 *
6325 * @returns The register value.
6326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6327 * @param iReg The register.
6328 */
6329DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6330{
6331 Assert(iReg < 16);
6332 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6333}
6334
6335
6336/**
6337 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6338 *
6339 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6340 * segment limit.
6341 *
6342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6343 * @param offNextInstr The offset of the next instruction.
6344 */
6345IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6346{
6347 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6348 switch (pVCpu->iem.s.enmEffOpSize)
6349 {
6350 case IEMMODE_16BIT:
6351 {
6352 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6353 if ( uNewIp > pCtx->cs.u32Limit
6354 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6355 return iemRaiseGeneralProtectionFault0(pVCpu);
6356 pCtx->rip = uNewIp;
6357 break;
6358 }
6359
6360 case IEMMODE_32BIT:
6361 {
6362 Assert(pCtx->rip <= UINT32_MAX);
6363 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6364
6365 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6366 if (uNewEip > pCtx->cs.u32Limit)
6367 return iemRaiseGeneralProtectionFault0(pVCpu);
6368 pCtx->rip = uNewEip;
6369 break;
6370 }
6371
6372 case IEMMODE_64BIT:
6373 {
6374 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6375
6376 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6377 if (!IEM_IS_CANONICAL(uNewRip))
6378 return iemRaiseGeneralProtectionFault0(pVCpu);
6379 pCtx->rip = uNewRip;
6380 break;
6381 }
6382
6383 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6384 }
6385
6386 pCtx->eflags.Bits.u1RF = 0;
6387
6388#ifndef IEM_WITH_CODE_TLB
6389 /* Flush the prefetch buffer. */
6390 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6391#endif
6392
6393 return VINF_SUCCESS;
6394}
6395
6396
6397/**
6398 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6399 *
6400 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6401 * segment limit.
6402 *
6403 * @returns Strict VBox status code.
6404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6405 * @param offNextInstr The offset of the next instruction.
6406 */
6407IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6408{
6409 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6410 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6411
6412 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6413 if ( uNewIp > pCtx->cs.u32Limit
6414 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6415 return iemRaiseGeneralProtectionFault0(pVCpu);
6416 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6417 pCtx->rip = uNewIp;
6418 pCtx->eflags.Bits.u1RF = 0;
6419
6420#ifndef IEM_WITH_CODE_TLB
6421 /* Flush the prefetch buffer. */
6422 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6423#endif
6424
6425 return VINF_SUCCESS;
6426}
6427
6428
6429/**
6430 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6431 *
6432 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6433 * segment limit.
6434 *
6435 * @returns Strict VBox status code.
6436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6437 * @param offNextInstr The offset of the next instruction.
6438 */
6439IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6440{
6441 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6442 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6443
6444 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6445 {
6446 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6447
6448 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6449 if (uNewEip > pCtx->cs.u32Limit)
6450 return iemRaiseGeneralProtectionFault0(pVCpu);
6451 pCtx->rip = uNewEip;
6452 }
6453 else
6454 {
6455 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6456
6457 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6458 if (!IEM_IS_CANONICAL(uNewRip))
6459 return iemRaiseGeneralProtectionFault0(pVCpu);
6460 pCtx->rip = uNewRip;
6461 }
6462 pCtx->eflags.Bits.u1RF = 0;
6463
6464#ifndef IEM_WITH_CODE_TLB
6465 /* Flush the prefetch buffer. */
6466 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6467#endif
6468
6469 return VINF_SUCCESS;
6470}
6471
6472
6473/**
6474 * Performs a near jump to the specified address.
6475 *
6476 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6477 * segment limit.
6478 *
6479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6480 * @param uNewRip The new RIP value.
6481 */
6482IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6483{
6484 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6485 switch (pVCpu->iem.s.enmEffOpSize)
6486 {
6487 case IEMMODE_16BIT:
6488 {
6489 Assert(uNewRip <= UINT16_MAX);
6490 if ( uNewRip > pCtx->cs.u32Limit
6491 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6492 return iemRaiseGeneralProtectionFault0(pVCpu);
6493 /** @todo Test 16-bit jump in 64-bit mode. */
6494 pCtx->rip = uNewRip;
6495 break;
6496 }
6497
6498 case IEMMODE_32BIT:
6499 {
6500 Assert(uNewRip <= UINT32_MAX);
6501 Assert(pCtx->rip <= UINT32_MAX);
6502 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6503
6504 if (uNewRip > pCtx->cs.u32Limit)
6505 return iemRaiseGeneralProtectionFault0(pVCpu);
6506 pCtx->rip = uNewRip;
6507 break;
6508 }
6509
6510 case IEMMODE_64BIT:
6511 {
6512 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6513
6514 if (!IEM_IS_CANONICAL(uNewRip))
6515 return iemRaiseGeneralProtectionFault0(pVCpu);
6516 pCtx->rip = uNewRip;
6517 break;
6518 }
6519
6520 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6521 }
6522
6523 pCtx->eflags.Bits.u1RF = 0;
6524
6525#ifndef IEM_WITH_CODE_TLB
6526 /* Flush the prefetch buffer. */
6527 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6528#endif
6529
6530 return VINF_SUCCESS;
6531}
6532
6533
6534/**
6535 * Get the address of the top of the stack.
6536 *
6537 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6538 * @param pCtx The CPU context which SP/ESP/RSP should be
6539 * read.
6540 */
6541DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6542{
6543 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6544 return pCtx->rsp;
6545 if (pCtx->ss.Attr.n.u1DefBig)
6546 return pCtx->esp;
6547 return pCtx->sp;
6548}
6549
6550
6551/**
6552 * Updates the RIP/EIP/IP to point to the next instruction.
6553 *
6554 * This function leaves the EFLAGS.RF flag alone.
6555 *
6556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6557 * @param cbInstr The number of bytes to add.
6558 */
6559IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6560{
6561 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6562 switch (pVCpu->iem.s.enmCpuMode)
6563 {
6564 case IEMMODE_16BIT:
6565 Assert(pCtx->rip <= UINT16_MAX);
6566 pCtx->eip += cbInstr;
6567 pCtx->eip &= UINT32_C(0xffff);
6568 break;
6569
6570 case IEMMODE_32BIT:
6571 pCtx->eip += cbInstr;
6572 Assert(pCtx->rip <= UINT32_MAX);
6573 break;
6574
6575 case IEMMODE_64BIT:
6576 pCtx->rip += cbInstr;
6577 break;
6578 default: AssertFailed();
6579 }
6580}
6581
6582
6583#if 0
6584/**
6585 * Updates the RIP/EIP/IP to point to the next instruction.
6586 *
6587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6588 */
6589IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6590{
6591 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6592}
6593#endif
6594
6595
6596
6597/**
6598 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6599 *
6600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6601 * @param cbInstr The number of bytes to add.
6602 */
6603IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6604{
6605 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6606
6607 pCtx->eflags.Bits.u1RF = 0;
6608
6609 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6610#if ARCH_BITS >= 64
6611 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6612 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6613 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6614#else
6615 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6616 pCtx->rip += cbInstr;
6617 else
6618 pCtx->eip += cbInstr;
6619#endif
6620}
6621
6622
6623/**
6624 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6625 *
6626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6627 */
6628IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6629{
6630 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6631}
6632
6633
6634/**
6635 * Adds to the stack pointer.
6636 *
6637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6638 * @param pCtx The CPU context which SP/ESP/RSP should be
6639 * updated.
6640 * @param cbToAdd The number of bytes to add (8-bit!).
6641 */
6642DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6643{
6644 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6645 pCtx->rsp += cbToAdd;
6646 else if (pCtx->ss.Attr.n.u1DefBig)
6647 pCtx->esp += cbToAdd;
6648 else
6649 pCtx->sp += cbToAdd;
6650}
6651
6652
6653/**
6654 * Subtracts from the stack pointer.
6655 *
6656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6657 * @param pCtx The CPU context which SP/ESP/RSP should be
6658 * updated.
6659 * @param cbToSub The number of bytes to subtract (8-bit!).
6660 */
6661DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6662{
6663 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6664 pCtx->rsp -= cbToSub;
6665 else if (pCtx->ss.Attr.n.u1DefBig)
6666 pCtx->esp -= cbToSub;
6667 else
6668 pCtx->sp -= cbToSub;
6669}
6670
6671
6672/**
6673 * Adds to the temporary stack pointer.
6674 *
6675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6676 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6677 * @param cbToAdd The number of bytes to add (16-bit).
6678 * @param pCtx Where to get the current stack mode.
6679 */
6680DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6681{
6682 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6683 pTmpRsp->u += cbToAdd;
6684 else if (pCtx->ss.Attr.n.u1DefBig)
6685 pTmpRsp->DWords.dw0 += cbToAdd;
6686 else
6687 pTmpRsp->Words.w0 += cbToAdd;
6688}
6689
6690
6691/**
6692 * Subtracts from the temporary stack pointer.
6693 *
6694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6695 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6696 * @param cbToSub The number of bytes to subtract.
6697 * @param pCtx Where to get the current stack mode.
6698 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6699 * expecting that.
6700 */
6701DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6702{
6703 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6704 pTmpRsp->u -= cbToSub;
6705 else if (pCtx->ss.Attr.n.u1DefBig)
6706 pTmpRsp->DWords.dw0 -= cbToSub;
6707 else
6708 pTmpRsp->Words.w0 -= cbToSub;
6709}
6710
6711
6712/**
6713 * Calculates the effective stack address for a push of the specified size as
6714 * well as the new RSP value (upper bits may be masked).
6715 *
6716 * @returns Effective stack addressf for the push.
6717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6718 * @param pCtx Where to get the current stack mode.
6719 * @param cbItem The size of the stack item to pop.
6720 * @param puNewRsp Where to return the new RSP value.
6721 */
6722DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6723{
6724 RTUINT64U uTmpRsp;
6725 RTGCPTR GCPtrTop;
6726 uTmpRsp.u = pCtx->rsp;
6727
6728 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6729 GCPtrTop = uTmpRsp.u -= cbItem;
6730 else if (pCtx->ss.Attr.n.u1DefBig)
6731 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6732 else
6733 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6734 *puNewRsp = uTmpRsp.u;
6735 return GCPtrTop;
6736}
6737
6738
6739/**
6740 * Gets the current stack pointer and calculates the value after a pop of the
6741 * specified size.
6742 *
6743 * @returns Current stack pointer.
6744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6745 * @param pCtx Where to get the current stack mode.
6746 * @param cbItem The size of the stack item to pop.
6747 * @param puNewRsp Where to return the new RSP value.
6748 */
6749DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6750{
6751 RTUINT64U uTmpRsp;
6752 RTGCPTR GCPtrTop;
6753 uTmpRsp.u = pCtx->rsp;
6754
6755 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6756 {
6757 GCPtrTop = uTmpRsp.u;
6758 uTmpRsp.u += cbItem;
6759 }
6760 else if (pCtx->ss.Attr.n.u1DefBig)
6761 {
6762 GCPtrTop = uTmpRsp.DWords.dw0;
6763 uTmpRsp.DWords.dw0 += cbItem;
6764 }
6765 else
6766 {
6767 GCPtrTop = uTmpRsp.Words.w0;
6768 uTmpRsp.Words.w0 += cbItem;
6769 }
6770 *puNewRsp = uTmpRsp.u;
6771 return GCPtrTop;
6772}
6773
6774
6775/**
6776 * Calculates the effective stack address for a push of the specified size as
6777 * well as the new temporary RSP value (upper bits may be masked).
6778 *
6779 * @returns Effective stack addressf for the push.
6780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6781 * @param pCtx Where to get the current stack mode.
6782 * @param pTmpRsp The temporary stack pointer. This is updated.
6783 * @param cbItem The size of the stack item to pop.
6784 */
6785DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6786{
6787 RTGCPTR GCPtrTop;
6788
6789 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6790 GCPtrTop = pTmpRsp->u -= cbItem;
6791 else if (pCtx->ss.Attr.n.u1DefBig)
6792 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6793 else
6794 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6795 return GCPtrTop;
6796}
6797
6798
6799/**
6800 * Gets the effective stack address for a pop of the specified size and
6801 * calculates and updates the temporary RSP.
6802 *
6803 * @returns Current stack pointer.
6804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6805 * @param pCtx Where to get the current stack mode.
6806 * @param pTmpRsp The temporary stack pointer. This is updated.
6807 * @param cbItem The size of the stack item to pop.
6808 */
6809DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6810{
6811 RTGCPTR GCPtrTop;
6812 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6813 {
6814 GCPtrTop = pTmpRsp->u;
6815 pTmpRsp->u += cbItem;
6816 }
6817 else if (pCtx->ss.Attr.n.u1DefBig)
6818 {
6819 GCPtrTop = pTmpRsp->DWords.dw0;
6820 pTmpRsp->DWords.dw0 += cbItem;
6821 }
6822 else
6823 {
6824 GCPtrTop = pTmpRsp->Words.w0;
6825 pTmpRsp->Words.w0 += cbItem;
6826 }
6827 return GCPtrTop;
6828}
6829
6830/** @} */
6831
6832
6833/** @name FPU access and helpers.
6834 *
6835 * @{
6836 */
6837
6838
6839/**
6840 * Hook for preparing to use the host FPU.
6841 *
6842 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6843 *
6844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6845 */
6846DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6847{
6848#ifdef IN_RING3
6849 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6850#else
6851 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6852#endif
6853}
6854
6855
6856/**
6857 * Hook for preparing to use the host FPU for SSE.
6858 *
6859 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6860 *
6861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6862 */
6863DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6864{
6865 iemFpuPrepareUsage(pVCpu);
6866}
6867
6868
6869/**
6870 * Hook for preparing to use the host FPU for AVX.
6871 *
6872 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6873 *
6874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6875 */
6876DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6877{
6878 iemFpuPrepareUsage(pVCpu);
6879}
6880
6881
6882/**
6883 * Hook for actualizing the guest FPU state before the interpreter reads it.
6884 *
6885 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6886 *
6887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6888 */
6889DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6890{
6891#ifdef IN_RING3
6892 NOREF(pVCpu);
6893#else
6894 CPUMRZFpuStateActualizeForRead(pVCpu);
6895#endif
6896}
6897
6898
6899/**
6900 * Hook for actualizing the guest FPU state before the interpreter changes it.
6901 *
6902 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6903 *
6904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6905 */
6906DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6907{
6908#ifdef IN_RING3
6909 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6910#else
6911 CPUMRZFpuStateActualizeForChange(pVCpu);
6912#endif
6913}
6914
6915
6916/**
6917 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6918 * only.
6919 *
6920 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6921 *
6922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6923 */
6924DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6925{
6926#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6927 NOREF(pVCpu);
6928#else
6929 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6930#endif
6931}
6932
6933
6934/**
6935 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6936 * read+write.
6937 *
6938 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6939 *
6940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6941 */
6942DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6943{
6944#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6945 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6946#else
6947 CPUMRZFpuStateActualizeForChange(pVCpu);
6948#endif
6949}
6950
6951
6952/**
6953 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6954 * only.
6955 *
6956 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6957 *
6958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6959 */
6960DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6961{
6962#ifdef IN_RING3
6963 NOREF(pVCpu);
6964#else
6965 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6966#endif
6967}
6968
6969
6970/**
6971 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6972 * read+write.
6973 *
6974 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6975 *
6976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6977 */
6978DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
6979{
6980#ifdef IN_RING3
6981 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6982#else
6983 CPUMRZFpuStateActualizeForChange(pVCpu);
6984#endif
6985}
6986
6987
6988/**
6989 * Stores a QNaN value into a FPU register.
6990 *
6991 * @param pReg Pointer to the register.
6992 */
6993DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6994{
6995 pReg->au32[0] = UINT32_C(0x00000000);
6996 pReg->au32[1] = UINT32_C(0xc0000000);
6997 pReg->au16[4] = UINT16_C(0xffff);
6998}
6999
7000
7001/**
7002 * Updates the FOP, FPU.CS and FPUIP registers.
7003 *
7004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7005 * @param pCtx The CPU context.
7006 * @param pFpuCtx The FPU context.
7007 */
7008DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
7009{
7010 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7011 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7012 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7013 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7014 {
7015 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7016 * happens in real mode here based on the fnsave and fnstenv images. */
7017 pFpuCtx->CS = 0;
7018 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
7019 }
7020 else
7021 {
7022 pFpuCtx->CS = pCtx->cs.Sel;
7023 pFpuCtx->FPUIP = pCtx->rip;
7024 }
7025}
7026
7027
7028/**
7029 * Updates the x87.DS and FPUDP registers.
7030 *
7031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7032 * @param pCtx The CPU context.
7033 * @param pFpuCtx The FPU context.
7034 * @param iEffSeg The effective segment register.
7035 * @param GCPtrEff The effective address relative to @a iEffSeg.
7036 */
7037DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7038{
7039 RTSEL sel;
7040 switch (iEffSeg)
7041 {
7042 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7043 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7044 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7045 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7046 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7047 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7048 default:
7049 AssertMsgFailed(("%d\n", iEffSeg));
7050 sel = pCtx->ds.Sel;
7051 }
7052 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7053 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7054 {
7055 pFpuCtx->DS = 0;
7056 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7057 }
7058 else
7059 {
7060 pFpuCtx->DS = sel;
7061 pFpuCtx->FPUDP = GCPtrEff;
7062 }
7063}
7064
7065
7066/**
7067 * Rotates the stack registers in the push direction.
7068 *
7069 * @param pFpuCtx The FPU context.
7070 * @remarks This is a complete waste of time, but fxsave stores the registers in
7071 * stack order.
7072 */
7073DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7074{
7075 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7076 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7077 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7078 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7079 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7080 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7081 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7082 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7083 pFpuCtx->aRegs[0].r80 = r80Tmp;
7084}
7085
7086
7087/**
7088 * Rotates the stack registers in the pop direction.
7089 *
7090 * @param pFpuCtx The FPU context.
7091 * @remarks This is a complete waste of time, but fxsave stores the registers in
7092 * stack order.
7093 */
7094DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7095{
7096 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7097 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7098 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7099 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7100 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7101 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7102 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7103 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7104 pFpuCtx->aRegs[7].r80 = r80Tmp;
7105}
7106
7107
7108/**
7109 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7110 * exception prevents it.
7111 *
7112 * @param pResult The FPU operation result to push.
7113 * @param pFpuCtx The FPU context.
7114 */
7115IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7116{
7117 /* Update FSW and bail if there are pending exceptions afterwards. */
7118 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7119 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7120 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7121 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7122 {
7123 pFpuCtx->FSW = fFsw;
7124 return;
7125 }
7126
7127 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7128 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7129 {
7130 /* All is fine, push the actual value. */
7131 pFpuCtx->FTW |= RT_BIT(iNewTop);
7132 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7133 }
7134 else if (pFpuCtx->FCW & X86_FCW_IM)
7135 {
7136 /* Masked stack overflow, push QNaN. */
7137 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7138 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7139 }
7140 else
7141 {
7142 /* Raise stack overflow, don't push anything. */
7143 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7144 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7145 return;
7146 }
7147
7148 fFsw &= ~X86_FSW_TOP_MASK;
7149 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7150 pFpuCtx->FSW = fFsw;
7151
7152 iemFpuRotateStackPush(pFpuCtx);
7153}
7154
7155
7156/**
7157 * Stores a result in a FPU register and updates the FSW and FTW.
7158 *
7159 * @param pFpuCtx The FPU context.
7160 * @param pResult The result to store.
7161 * @param iStReg Which FPU register to store it in.
7162 */
7163IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7164{
7165 Assert(iStReg < 8);
7166 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7167 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7168 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7169 pFpuCtx->FTW |= RT_BIT(iReg);
7170 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7171}
7172
7173
7174/**
7175 * Only updates the FPU status word (FSW) with the result of the current
7176 * instruction.
7177 *
7178 * @param pFpuCtx The FPU context.
7179 * @param u16FSW The FSW output of the current instruction.
7180 */
7181IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7182{
7183 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7184 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7185}
7186
7187
7188/**
7189 * Pops one item off the FPU stack if no pending exception prevents it.
7190 *
7191 * @param pFpuCtx The FPU context.
7192 */
7193IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7194{
7195 /* Check pending exceptions. */
7196 uint16_t uFSW = pFpuCtx->FSW;
7197 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7198 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7199 return;
7200
7201 /* TOP--. */
7202 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7203 uFSW &= ~X86_FSW_TOP_MASK;
7204 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7205 pFpuCtx->FSW = uFSW;
7206
7207 /* Mark the previous ST0 as empty. */
7208 iOldTop >>= X86_FSW_TOP_SHIFT;
7209 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7210
7211 /* Rotate the registers. */
7212 iemFpuRotateStackPop(pFpuCtx);
7213}
7214
7215
7216/**
7217 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7218 *
7219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7220 * @param pResult The FPU operation result to push.
7221 */
7222IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7223{
7224 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7225 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7226 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7227 iemFpuMaybePushResult(pResult, pFpuCtx);
7228}
7229
7230
7231/**
7232 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7233 * and sets FPUDP and FPUDS.
7234 *
7235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7236 * @param pResult The FPU operation result to push.
7237 * @param iEffSeg The effective segment register.
7238 * @param GCPtrEff The effective address relative to @a iEffSeg.
7239 */
7240IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7241{
7242 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7243 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7244 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7245 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7246 iemFpuMaybePushResult(pResult, pFpuCtx);
7247}
7248
7249
7250/**
7251 * Replace ST0 with the first value and push the second onto the FPU stack,
7252 * unless a pending exception prevents it.
7253 *
7254 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7255 * @param pResult The FPU operation result to store and push.
7256 */
7257IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7258{
7259 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7260 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7261 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7262
7263 /* Update FSW and bail if there are pending exceptions afterwards. */
7264 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7265 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7266 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7267 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7268 {
7269 pFpuCtx->FSW = fFsw;
7270 return;
7271 }
7272
7273 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7274 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7275 {
7276 /* All is fine, push the actual value. */
7277 pFpuCtx->FTW |= RT_BIT(iNewTop);
7278 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7279 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7280 }
7281 else if (pFpuCtx->FCW & X86_FCW_IM)
7282 {
7283 /* Masked stack overflow, push QNaN. */
7284 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7285 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7286 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7287 }
7288 else
7289 {
7290 /* Raise stack overflow, don't push anything. */
7291 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7292 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7293 return;
7294 }
7295
7296 fFsw &= ~X86_FSW_TOP_MASK;
7297 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7298 pFpuCtx->FSW = fFsw;
7299
7300 iemFpuRotateStackPush(pFpuCtx);
7301}
7302
7303
7304/**
7305 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7306 * FOP.
7307 *
7308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7309 * @param pResult The result to store.
7310 * @param iStReg Which FPU register to store it in.
7311 */
7312IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7313{
7314 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7315 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7316 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7317 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7318}
7319
7320
7321/**
7322 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7323 * FOP, and then pops the stack.
7324 *
7325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7326 * @param pResult The result to store.
7327 * @param iStReg Which FPU register to store it in.
7328 */
7329IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7330{
7331 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7332 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7333 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7334 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7335 iemFpuMaybePopOne(pFpuCtx);
7336}
7337
7338
7339/**
7340 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7341 * FPUDP, and FPUDS.
7342 *
7343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7344 * @param pResult The result to store.
7345 * @param iStReg Which FPU register to store it in.
7346 * @param iEffSeg The effective memory operand selector register.
7347 * @param GCPtrEff The effective memory operand offset.
7348 */
7349IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7350 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7351{
7352 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7353 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7354 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7355 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7356 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7357}
7358
7359
7360/**
7361 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7362 * FPUDP, and FPUDS, and then pops the stack.
7363 *
7364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7365 * @param pResult The result to store.
7366 * @param iStReg Which FPU register to store it in.
7367 * @param iEffSeg The effective memory operand selector register.
7368 * @param GCPtrEff The effective memory operand offset.
7369 */
7370IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7371 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7372{
7373 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7374 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7375 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7376 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7377 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7378 iemFpuMaybePopOne(pFpuCtx);
7379}
7380
7381
7382/**
7383 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7384 *
7385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7386 */
7387IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7388{
7389 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7390 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7391 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7392}
7393
7394
7395/**
7396 * Marks the specified stack register as free (for FFREE).
7397 *
7398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7399 * @param iStReg The register to free.
7400 */
7401IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7402{
7403 Assert(iStReg < 8);
7404 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7405 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7406 pFpuCtx->FTW &= ~RT_BIT(iReg);
7407}
7408
7409
7410/**
7411 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7412 *
7413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7414 */
7415IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7416{
7417 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7418 uint16_t uFsw = pFpuCtx->FSW;
7419 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7420 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7421 uFsw &= ~X86_FSW_TOP_MASK;
7422 uFsw |= uTop;
7423 pFpuCtx->FSW = uFsw;
7424}
7425
7426
7427/**
7428 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7429 *
7430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7431 */
7432IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7433{
7434 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7435 uint16_t uFsw = pFpuCtx->FSW;
7436 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7437 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7438 uFsw &= ~X86_FSW_TOP_MASK;
7439 uFsw |= uTop;
7440 pFpuCtx->FSW = uFsw;
7441}
7442
7443
7444/**
7445 * Updates the FSW, FOP, FPUIP, and FPUCS.
7446 *
7447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7448 * @param u16FSW The FSW from the current instruction.
7449 */
7450IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7451{
7452 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7453 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7454 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7455 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7456}
7457
7458
7459/**
7460 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7461 *
7462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7463 * @param u16FSW The FSW from the current instruction.
7464 */
7465IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7466{
7467 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7468 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7469 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7470 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7471 iemFpuMaybePopOne(pFpuCtx);
7472}
7473
7474
7475/**
7476 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7477 *
7478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7479 * @param u16FSW The FSW from the current instruction.
7480 * @param iEffSeg The effective memory operand selector register.
7481 * @param GCPtrEff The effective memory operand offset.
7482 */
7483IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7484{
7485 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7486 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7487 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7488 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7489 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7490}
7491
7492
7493/**
7494 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7495 *
7496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7497 * @param u16FSW The FSW from the current instruction.
7498 */
7499IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7500{
7501 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7502 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7503 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7504 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7505 iemFpuMaybePopOne(pFpuCtx);
7506 iemFpuMaybePopOne(pFpuCtx);
7507}
7508
7509
7510/**
7511 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7512 *
7513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7514 * @param u16FSW The FSW from the current instruction.
7515 * @param iEffSeg The effective memory operand selector register.
7516 * @param GCPtrEff The effective memory operand offset.
7517 */
7518IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7519{
7520 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7521 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7522 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7523 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7524 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7525 iemFpuMaybePopOne(pFpuCtx);
7526}
7527
7528
7529/**
7530 * Worker routine for raising an FPU stack underflow exception.
7531 *
7532 * @param pFpuCtx The FPU context.
7533 * @param iStReg The stack register being accessed.
7534 */
7535IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7536{
7537 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7538 if (pFpuCtx->FCW & X86_FCW_IM)
7539 {
7540 /* Masked underflow. */
7541 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7542 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7543 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7544 if (iStReg != UINT8_MAX)
7545 {
7546 pFpuCtx->FTW |= RT_BIT(iReg);
7547 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7548 }
7549 }
7550 else
7551 {
7552 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7553 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7554 }
7555}
7556
7557
7558/**
7559 * Raises a FPU stack underflow exception.
7560 *
7561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7562 * @param iStReg The destination register that should be loaded
7563 * with QNaN if \#IS is not masked. Specify
7564 * UINT8_MAX if none (like for fcom).
7565 */
7566DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7567{
7568 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7569 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7570 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7571 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7572}
7573
7574
7575DECL_NO_INLINE(IEM_STATIC, void)
7576iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7577{
7578 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7579 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7580 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7581 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7582 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7583}
7584
7585
7586DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7587{
7588 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7589 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7590 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7591 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7592 iemFpuMaybePopOne(pFpuCtx);
7593}
7594
7595
7596DECL_NO_INLINE(IEM_STATIC, void)
7597iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7598{
7599 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7600 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7601 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7602 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7603 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7604 iemFpuMaybePopOne(pFpuCtx);
7605}
7606
7607
7608DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7609{
7610 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7611 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7612 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7613 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7614 iemFpuMaybePopOne(pFpuCtx);
7615 iemFpuMaybePopOne(pFpuCtx);
7616}
7617
7618
7619DECL_NO_INLINE(IEM_STATIC, void)
7620iemFpuStackPushUnderflow(PVMCPU pVCpu)
7621{
7622 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7623 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7624 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7625
7626 if (pFpuCtx->FCW & X86_FCW_IM)
7627 {
7628 /* Masked overflow - Push QNaN. */
7629 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7630 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7631 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7632 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7633 pFpuCtx->FTW |= RT_BIT(iNewTop);
7634 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7635 iemFpuRotateStackPush(pFpuCtx);
7636 }
7637 else
7638 {
7639 /* Exception pending - don't change TOP or the register stack. */
7640 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7641 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7642 }
7643}
7644
7645
7646DECL_NO_INLINE(IEM_STATIC, void)
7647iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7648{
7649 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7650 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7651 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7652
7653 if (pFpuCtx->FCW & X86_FCW_IM)
7654 {
7655 /* Masked overflow - Push QNaN. */
7656 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7657 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7658 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7659 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7660 pFpuCtx->FTW |= RT_BIT(iNewTop);
7661 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7662 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7663 iemFpuRotateStackPush(pFpuCtx);
7664 }
7665 else
7666 {
7667 /* Exception pending - don't change TOP or the register stack. */
7668 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7669 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7670 }
7671}
7672
7673
7674/**
7675 * Worker routine for raising an FPU stack overflow exception on a push.
7676 *
7677 * @param pFpuCtx The FPU context.
7678 */
7679IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7680{
7681 if (pFpuCtx->FCW & X86_FCW_IM)
7682 {
7683 /* Masked overflow. */
7684 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7685 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7686 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7687 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7688 pFpuCtx->FTW |= RT_BIT(iNewTop);
7689 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7690 iemFpuRotateStackPush(pFpuCtx);
7691 }
7692 else
7693 {
7694 /* Exception pending - don't change TOP or the register stack. */
7695 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7696 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7697 }
7698}
7699
7700
7701/**
7702 * Raises a FPU stack overflow exception on a push.
7703 *
7704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7705 */
7706DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7707{
7708 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7709 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7710 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7711 iemFpuStackPushOverflowOnly(pFpuCtx);
7712}
7713
7714
7715/**
7716 * Raises a FPU stack overflow exception on a push with a memory operand.
7717 *
7718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7719 * @param iEffSeg The effective memory operand selector register.
7720 * @param GCPtrEff The effective memory operand offset.
7721 */
7722DECL_NO_INLINE(IEM_STATIC, void)
7723iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7724{
7725 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7726 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7727 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7728 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7729 iemFpuStackPushOverflowOnly(pFpuCtx);
7730}
7731
7732
7733IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7734{
7735 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7736 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7737 if (pFpuCtx->FTW & RT_BIT(iReg))
7738 return VINF_SUCCESS;
7739 return VERR_NOT_FOUND;
7740}
7741
7742
7743IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7744{
7745 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7746 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7747 if (pFpuCtx->FTW & RT_BIT(iReg))
7748 {
7749 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7750 return VINF_SUCCESS;
7751 }
7752 return VERR_NOT_FOUND;
7753}
7754
7755
7756IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7757 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7758{
7759 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7760 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7761 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7762 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7763 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7764 {
7765 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7766 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7767 return VINF_SUCCESS;
7768 }
7769 return VERR_NOT_FOUND;
7770}
7771
7772
7773IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7774{
7775 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7776 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7777 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7778 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7779 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7780 {
7781 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7782 return VINF_SUCCESS;
7783 }
7784 return VERR_NOT_FOUND;
7785}
7786
7787
7788/**
7789 * Updates the FPU exception status after FCW is changed.
7790 *
7791 * @param pFpuCtx The FPU context.
7792 */
7793IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7794{
7795 uint16_t u16Fsw = pFpuCtx->FSW;
7796 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7797 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7798 else
7799 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7800 pFpuCtx->FSW = u16Fsw;
7801}
7802
7803
7804/**
7805 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7806 *
7807 * @returns The full FTW.
7808 * @param pFpuCtx The FPU context.
7809 */
7810IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7811{
7812 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7813 uint16_t u16Ftw = 0;
7814 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7815 for (unsigned iSt = 0; iSt < 8; iSt++)
7816 {
7817 unsigned const iReg = (iSt + iTop) & 7;
7818 if (!(u8Ftw & RT_BIT(iReg)))
7819 u16Ftw |= 3 << (iReg * 2); /* empty */
7820 else
7821 {
7822 uint16_t uTag;
7823 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7824 if (pr80Reg->s.uExponent == 0x7fff)
7825 uTag = 2; /* Exponent is all 1's => Special. */
7826 else if (pr80Reg->s.uExponent == 0x0000)
7827 {
7828 if (pr80Reg->s.u64Mantissa == 0x0000)
7829 uTag = 1; /* All bits are zero => Zero. */
7830 else
7831 uTag = 2; /* Must be special. */
7832 }
7833 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7834 uTag = 0; /* Valid. */
7835 else
7836 uTag = 2; /* Must be special. */
7837
7838 u16Ftw |= uTag << (iReg * 2); /* empty */
7839 }
7840 }
7841
7842 return u16Ftw;
7843}
7844
7845
7846/**
7847 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7848 *
7849 * @returns The compressed FTW.
7850 * @param u16FullFtw The full FTW to convert.
7851 */
7852IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7853{
7854 uint8_t u8Ftw = 0;
7855 for (unsigned i = 0; i < 8; i++)
7856 {
7857 if ((u16FullFtw & 3) != 3 /*empty*/)
7858 u8Ftw |= RT_BIT(i);
7859 u16FullFtw >>= 2;
7860 }
7861
7862 return u8Ftw;
7863}
7864
7865/** @} */
7866
7867
7868/** @name Memory access.
7869 *
7870 * @{
7871 */
7872
7873
7874/**
7875 * Updates the IEMCPU::cbWritten counter if applicable.
7876 *
7877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7878 * @param fAccess The access being accounted for.
7879 * @param cbMem The access size.
7880 */
7881DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7882{
7883 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7884 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7885 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7886}
7887
7888
7889/**
7890 * Checks if the given segment can be written to, raise the appropriate
7891 * exception if not.
7892 *
7893 * @returns VBox strict status code.
7894 *
7895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7896 * @param pHid Pointer to the hidden register.
7897 * @param iSegReg The register number.
7898 * @param pu64BaseAddr Where to return the base address to use for the
7899 * segment. (In 64-bit code it may differ from the
7900 * base in the hidden segment.)
7901 */
7902IEM_STATIC VBOXSTRICTRC
7903iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7904{
7905 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7906 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7907 else
7908 {
7909 if (!pHid->Attr.n.u1Present)
7910 {
7911 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7912 AssertRelease(uSel == 0);
7913 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7914 return iemRaiseGeneralProtectionFault0(pVCpu);
7915 }
7916
7917 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7918 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7919 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7920 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7921 *pu64BaseAddr = pHid->u64Base;
7922 }
7923 return VINF_SUCCESS;
7924}
7925
7926
7927/**
7928 * Checks if the given segment can be read from, raise the appropriate
7929 * exception if not.
7930 *
7931 * @returns VBox strict status code.
7932 *
7933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7934 * @param pHid Pointer to the hidden register.
7935 * @param iSegReg The register number.
7936 * @param pu64BaseAddr Where to return the base address to use for the
7937 * segment. (In 64-bit code it may differ from the
7938 * base in the hidden segment.)
7939 */
7940IEM_STATIC VBOXSTRICTRC
7941iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7942{
7943 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7944 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7945 else
7946 {
7947 if (!pHid->Attr.n.u1Present)
7948 {
7949 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7950 AssertRelease(uSel == 0);
7951 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7952 return iemRaiseGeneralProtectionFault0(pVCpu);
7953 }
7954
7955 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7956 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7957 *pu64BaseAddr = pHid->u64Base;
7958 }
7959 return VINF_SUCCESS;
7960}
7961
7962
7963/**
7964 * Applies the segment limit, base and attributes.
7965 *
7966 * This may raise a \#GP or \#SS.
7967 *
7968 * @returns VBox strict status code.
7969 *
7970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7971 * @param fAccess The kind of access which is being performed.
7972 * @param iSegReg The index of the segment register to apply.
7973 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7974 * TSS, ++).
7975 * @param cbMem The access size.
7976 * @param pGCPtrMem Pointer to the guest memory address to apply
7977 * segmentation to. Input and output parameter.
7978 */
7979IEM_STATIC VBOXSTRICTRC
7980iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7981{
7982 if (iSegReg == UINT8_MAX)
7983 return VINF_SUCCESS;
7984
7985 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7986 switch (pVCpu->iem.s.enmCpuMode)
7987 {
7988 case IEMMODE_16BIT:
7989 case IEMMODE_32BIT:
7990 {
7991 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7992 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7993
7994 if ( pSel->Attr.n.u1Present
7995 && !pSel->Attr.n.u1Unusable)
7996 {
7997 Assert(pSel->Attr.n.u1DescType);
7998 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7999 {
8000 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8001 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8002 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8003
8004 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8005 {
8006 /** @todo CPL check. */
8007 }
8008
8009 /*
8010 * There are two kinds of data selectors, normal and expand down.
8011 */
8012 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8013 {
8014 if ( GCPtrFirst32 > pSel->u32Limit
8015 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8016 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8017 }
8018 else
8019 {
8020 /*
8021 * The upper boundary is defined by the B bit, not the G bit!
8022 */
8023 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8024 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8025 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8026 }
8027 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8028 }
8029 else
8030 {
8031
8032 /*
8033 * Code selector and usually be used to read thru, writing is
8034 * only permitted in real and V8086 mode.
8035 */
8036 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8037 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8038 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8039 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8040 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8041
8042 if ( GCPtrFirst32 > pSel->u32Limit
8043 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8044 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8045
8046 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8047 {
8048 /** @todo CPL check. */
8049 }
8050
8051 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8052 }
8053 }
8054 else
8055 return iemRaiseGeneralProtectionFault0(pVCpu);
8056 return VINF_SUCCESS;
8057 }
8058
8059 case IEMMODE_64BIT:
8060 {
8061 RTGCPTR GCPtrMem = *pGCPtrMem;
8062 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8063 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8064
8065 Assert(cbMem >= 1);
8066 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8067 return VINF_SUCCESS;
8068 return iemRaiseGeneralProtectionFault0(pVCpu);
8069 }
8070
8071 default:
8072 AssertFailedReturn(VERR_IEM_IPE_7);
8073 }
8074}
8075
8076
8077/**
8078 * Translates a virtual address to a physical physical address and checks if we
8079 * can access the page as specified.
8080 *
8081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8082 * @param GCPtrMem The virtual address.
8083 * @param fAccess The intended access.
8084 * @param pGCPhysMem Where to return the physical address.
8085 */
8086IEM_STATIC VBOXSTRICTRC
8087iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8088{
8089 /** @todo Need a different PGM interface here. We're currently using
8090 * generic / REM interfaces. this won't cut it for R0 & RC. */
8091 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8092 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8093 RTGCPHYS GCPhys;
8094 uint64_t fFlags;
8095 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8096 if (RT_FAILURE(rc))
8097 {
8098 /** @todo Check unassigned memory in unpaged mode. */
8099 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8100 *pGCPhysMem = NIL_RTGCPHYS;
8101 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8102 }
8103
8104 /* If the page is writable and does not have the no-exec bit set, all
8105 access is allowed. Otherwise we'll have to check more carefully... */
8106 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8107 {
8108 /* Write to read only memory? */
8109 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8110 && !(fFlags & X86_PTE_RW)
8111 && ( (pVCpu->iem.s.uCpl == 3
8112 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8113 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8114 {
8115 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8116 *pGCPhysMem = NIL_RTGCPHYS;
8117 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8118 }
8119
8120 /* Kernel memory accessed by userland? */
8121 if ( !(fFlags & X86_PTE_US)
8122 && pVCpu->iem.s.uCpl == 3
8123 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8124 {
8125 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8126 *pGCPhysMem = NIL_RTGCPHYS;
8127 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8128 }
8129
8130 /* Executing non-executable memory? */
8131 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8132 && (fFlags & X86_PTE_PAE_NX)
8133 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8134 {
8135 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8136 *pGCPhysMem = NIL_RTGCPHYS;
8137 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8138 VERR_ACCESS_DENIED);
8139 }
8140 }
8141
8142 /*
8143 * Set the dirty / access flags.
8144 * ASSUMES this is set when the address is translated rather than on committ...
8145 */
8146 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8147 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8148 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8149 {
8150 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8151 AssertRC(rc2);
8152 }
8153
8154 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8155 *pGCPhysMem = GCPhys;
8156 return VINF_SUCCESS;
8157}
8158
8159
8160
8161/**
8162 * Maps a physical page.
8163 *
8164 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8166 * @param GCPhysMem The physical address.
8167 * @param fAccess The intended access.
8168 * @param ppvMem Where to return the mapping address.
8169 * @param pLock The PGM lock.
8170 */
8171IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8172{
8173#ifdef IEM_VERIFICATION_MODE_FULL
8174 /* Force the alternative path so we can ignore writes. */
8175 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8176 {
8177 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8178 {
8179 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8180 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8181 if (RT_FAILURE(rc2))
8182 pVCpu->iem.s.fProblematicMemory = true;
8183 }
8184 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8185 }
8186#endif
8187#ifdef IEM_LOG_MEMORY_WRITES
8188 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8189 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8190#endif
8191#ifdef IEM_VERIFICATION_MODE_MINIMAL
8192 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8193#endif
8194
8195 /** @todo This API may require some improving later. A private deal with PGM
8196 * regarding locking and unlocking needs to be struct. A couple of TLBs
8197 * living in PGM, but with publicly accessible inlined access methods
8198 * could perhaps be an even better solution. */
8199 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8200 GCPhysMem,
8201 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8202 pVCpu->iem.s.fBypassHandlers,
8203 ppvMem,
8204 pLock);
8205 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8206 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8207
8208#ifdef IEM_VERIFICATION_MODE_FULL
8209 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8210 pVCpu->iem.s.fProblematicMemory = true;
8211#endif
8212 return rc;
8213}
8214
8215
8216/**
8217 * Unmap a page previously mapped by iemMemPageMap.
8218 *
8219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8220 * @param GCPhysMem The physical address.
8221 * @param fAccess The intended access.
8222 * @param pvMem What iemMemPageMap returned.
8223 * @param pLock The PGM lock.
8224 */
8225DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8226{
8227 NOREF(pVCpu);
8228 NOREF(GCPhysMem);
8229 NOREF(fAccess);
8230 NOREF(pvMem);
8231 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8232}
8233
8234
8235/**
8236 * Looks up a memory mapping entry.
8237 *
8238 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8240 * @param pvMem The memory address.
8241 * @param fAccess The access to.
8242 */
8243DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8244{
8245 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8246 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8247 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8248 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8249 return 0;
8250 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8251 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8252 return 1;
8253 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8254 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8255 return 2;
8256 return VERR_NOT_FOUND;
8257}
8258
8259
8260/**
8261 * Finds a free memmap entry when using iNextMapping doesn't work.
8262 *
8263 * @returns Memory mapping index, 1024 on failure.
8264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8265 */
8266IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8267{
8268 /*
8269 * The easy case.
8270 */
8271 if (pVCpu->iem.s.cActiveMappings == 0)
8272 {
8273 pVCpu->iem.s.iNextMapping = 1;
8274 return 0;
8275 }
8276
8277 /* There should be enough mappings for all instructions. */
8278 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8279
8280 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8281 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8282 return i;
8283
8284 AssertFailedReturn(1024);
8285}
8286
8287
8288/**
8289 * Commits a bounce buffer that needs writing back and unmaps it.
8290 *
8291 * @returns Strict VBox status code.
8292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8293 * @param iMemMap The index of the buffer to commit.
8294 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8295 * Always false in ring-3, obviously.
8296 */
8297IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8298{
8299 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8300 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8301#ifdef IN_RING3
8302 Assert(!fPostponeFail);
8303 RT_NOREF_PV(fPostponeFail);
8304#endif
8305
8306 /*
8307 * Do the writing.
8308 */
8309#ifndef IEM_VERIFICATION_MODE_MINIMAL
8310 PVM pVM = pVCpu->CTX_SUFF(pVM);
8311 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8312 && !IEM_VERIFICATION_ENABLED(pVCpu))
8313 {
8314 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8315 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8316 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8317 if (!pVCpu->iem.s.fBypassHandlers)
8318 {
8319 /*
8320 * Carefully and efficiently dealing with access handler return
8321 * codes make this a little bloated.
8322 */
8323 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8324 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8325 pbBuf,
8326 cbFirst,
8327 PGMACCESSORIGIN_IEM);
8328 if (rcStrict == VINF_SUCCESS)
8329 {
8330 if (cbSecond)
8331 {
8332 rcStrict = PGMPhysWrite(pVM,
8333 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8334 pbBuf + cbFirst,
8335 cbSecond,
8336 PGMACCESSORIGIN_IEM);
8337 if (rcStrict == VINF_SUCCESS)
8338 { /* nothing */ }
8339 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8340 {
8341 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8342 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8343 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8344 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8345 }
8346# ifndef IN_RING3
8347 else if (fPostponeFail)
8348 {
8349 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8350 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8351 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8352 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8353 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8354 return iemSetPassUpStatus(pVCpu, rcStrict);
8355 }
8356# endif
8357 else
8358 {
8359 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8360 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8361 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8362 return rcStrict;
8363 }
8364 }
8365 }
8366 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8367 {
8368 if (!cbSecond)
8369 {
8370 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8371 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8372 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8373 }
8374 else
8375 {
8376 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8377 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8378 pbBuf + cbFirst,
8379 cbSecond,
8380 PGMACCESSORIGIN_IEM);
8381 if (rcStrict2 == VINF_SUCCESS)
8382 {
8383 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8384 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8385 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8386 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8387 }
8388 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8389 {
8390 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8392 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8393 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8394 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8395 }
8396# ifndef IN_RING3
8397 else if (fPostponeFail)
8398 {
8399 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8401 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8402 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8403 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8404 return iemSetPassUpStatus(pVCpu, rcStrict);
8405 }
8406# endif
8407 else
8408 {
8409 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8410 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8412 return rcStrict2;
8413 }
8414 }
8415 }
8416# ifndef IN_RING3
8417 else if (fPostponeFail)
8418 {
8419 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8420 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8421 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8422 if (!cbSecond)
8423 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8424 else
8425 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8426 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8427 return iemSetPassUpStatus(pVCpu, rcStrict);
8428 }
8429# endif
8430 else
8431 {
8432 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8433 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8435 return rcStrict;
8436 }
8437 }
8438 else
8439 {
8440 /*
8441 * No access handlers, much simpler.
8442 */
8443 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8444 if (RT_SUCCESS(rc))
8445 {
8446 if (cbSecond)
8447 {
8448 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8449 if (RT_SUCCESS(rc))
8450 { /* likely */ }
8451 else
8452 {
8453 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8454 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8455 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8456 return rc;
8457 }
8458 }
8459 }
8460 else
8461 {
8462 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8463 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8464 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8465 return rc;
8466 }
8467 }
8468 }
8469#endif
8470
8471#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8472 /*
8473 * Record the write(s).
8474 */
8475 if (!pVCpu->iem.s.fNoRem)
8476 {
8477 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8478 if (pEvtRec)
8479 {
8480 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8481 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8482 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8483 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8484 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8485 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8486 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8487 }
8488 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8489 {
8490 pEvtRec = iemVerifyAllocRecord(pVCpu);
8491 if (pEvtRec)
8492 {
8493 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8494 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8495 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8496 memcpy(pEvtRec->u.RamWrite.ab,
8497 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8498 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8499 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8500 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8501 }
8502 }
8503 }
8504#endif
8505#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8506 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8507 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8508 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8509 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8510 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8511 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8512
8513 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8514 g_cbIemWrote = cbWrote;
8515 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8516#endif
8517
8518 /*
8519 * Free the mapping entry.
8520 */
8521 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8522 Assert(pVCpu->iem.s.cActiveMappings != 0);
8523 pVCpu->iem.s.cActiveMappings--;
8524 return VINF_SUCCESS;
8525}
8526
8527
8528/**
8529 * iemMemMap worker that deals with a request crossing pages.
8530 */
8531IEM_STATIC VBOXSTRICTRC
8532iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8533{
8534 /*
8535 * Do the address translations.
8536 */
8537 RTGCPHYS GCPhysFirst;
8538 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8539 if (rcStrict != VINF_SUCCESS)
8540 return rcStrict;
8541
8542 RTGCPHYS GCPhysSecond;
8543 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8544 fAccess, &GCPhysSecond);
8545 if (rcStrict != VINF_SUCCESS)
8546 return rcStrict;
8547 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8548
8549 PVM pVM = pVCpu->CTX_SUFF(pVM);
8550#ifdef IEM_VERIFICATION_MODE_FULL
8551 /*
8552 * Detect problematic memory when verifying so we can select
8553 * the right execution engine. (TLB: Redo this.)
8554 */
8555 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8556 {
8557 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8558 if (RT_SUCCESS(rc2))
8559 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8560 if (RT_FAILURE(rc2))
8561 pVCpu->iem.s.fProblematicMemory = true;
8562 }
8563#endif
8564
8565
8566 /*
8567 * Read in the current memory content if it's a read, execute or partial
8568 * write access.
8569 */
8570 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8571 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8572 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8573
8574 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8575 {
8576 if (!pVCpu->iem.s.fBypassHandlers)
8577 {
8578 /*
8579 * Must carefully deal with access handler status codes here,
8580 * makes the code a bit bloated.
8581 */
8582 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8583 if (rcStrict == VINF_SUCCESS)
8584 {
8585 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8586 if (rcStrict == VINF_SUCCESS)
8587 { /*likely */ }
8588 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8589 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8590 else
8591 {
8592 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8593 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8594 return rcStrict;
8595 }
8596 }
8597 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8598 {
8599 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8600 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8601 {
8602 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8603 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8604 }
8605 else
8606 {
8607 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8608 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8609 return rcStrict2;
8610 }
8611 }
8612 else
8613 {
8614 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8615 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8616 return rcStrict;
8617 }
8618 }
8619 else
8620 {
8621 /*
8622 * No informational status codes here, much more straight forward.
8623 */
8624 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8625 if (RT_SUCCESS(rc))
8626 {
8627 Assert(rc == VINF_SUCCESS);
8628 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8629 if (RT_SUCCESS(rc))
8630 Assert(rc == VINF_SUCCESS);
8631 else
8632 {
8633 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8634 return rc;
8635 }
8636 }
8637 else
8638 {
8639 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8640 return rc;
8641 }
8642 }
8643
8644#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8645 if ( !pVCpu->iem.s.fNoRem
8646 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8647 {
8648 /*
8649 * Record the reads.
8650 */
8651 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8652 if (pEvtRec)
8653 {
8654 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8655 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8656 pEvtRec->u.RamRead.cb = cbFirstPage;
8657 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8658 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8659 }
8660 pEvtRec = iemVerifyAllocRecord(pVCpu);
8661 if (pEvtRec)
8662 {
8663 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8664 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8665 pEvtRec->u.RamRead.cb = cbSecondPage;
8666 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8667 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8668 }
8669 }
8670#endif
8671 }
8672#ifdef VBOX_STRICT
8673 else
8674 memset(pbBuf, 0xcc, cbMem);
8675 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8676 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8677#endif
8678
8679 /*
8680 * Commit the bounce buffer entry.
8681 */
8682 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8683 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8684 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8685 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8686 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8687 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8688 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8689 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8690 pVCpu->iem.s.cActiveMappings++;
8691
8692 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8693 *ppvMem = pbBuf;
8694 return VINF_SUCCESS;
8695}
8696
8697
8698/**
8699 * iemMemMap woker that deals with iemMemPageMap failures.
8700 */
8701IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8702 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8703{
8704 /*
8705 * Filter out conditions we can handle and the ones which shouldn't happen.
8706 */
8707 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8708 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8709 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8710 {
8711 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8712 return rcMap;
8713 }
8714 pVCpu->iem.s.cPotentialExits++;
8715
8716 /*
8717 * Read in the current memory content if it's a read, execute or partial
8718 * write access.
8719 */
8720 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8721 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8722 {
8723 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8724 memset(pbBuf, 0xff, cbMem);
8725 else
8726 {
8727 int rc;
8728 if (!pVCpu->iem.s.fBypassHandlers)
8729 {
8730 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8731 if (rcStrict == VINF_SUCCESS)
8732 { /* nothing */ }
8733 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8734 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8735 else
8736 {
8737 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8738 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8739 return rcStrict;
8740 }
8741 }
8742 else
8743 {
8744 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8745 if (RT_SUCCESS(rc))
8746 { /* likely */ }
8747 else
8748 {
8749 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8750 GCPhysFirst, rc));
8751 return rc;
8752 }
8753 }
8754 }
8755
8756#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8757 if ( !pVCpu->iem.s.fNoRem
8758 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8759 {
8760 /*
8761 * Record the read.
8762 */
8763 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8764 if (pEvtRec)
8765 {
8766 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8767 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8768 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8769 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8770 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8771 }
8772 }
8773#endif
8774 }
8775#ifdef VBOX_STRICT
8776 else
8777 memset(pbBuf, 0xcc, cbMem);
8778#endif
8779#ifdef VBOX_STRICT
8780 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8781 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8782#endif
8783
8784 /*
8785 * Commit the bounce buffer entry.
8786 */
8787 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8788 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8789 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8790 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8791 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8792 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8793 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8794 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8795 pVCpu->iem.s.cActiveMappings++;
8796
8797 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8798 *ppvMem = pbBuf;
8799 return VINF_SUCCESS;
8800}
8801
8802
8803
8804/**
8805 * Maps the specified guest memory for the given kind of access.
8806 *
8807 * This may be using bounce buffering of the memory if it's crossing a page
8808 * boundary or if there is an access handler installed for any of it. Because
8809 * of lock prefix guarantees, we're in for some extra clutter when this
8810 * happens.
8811 *
8812 * This may raise a \#GP, \#SS, \#PF or \#AC.
8813 *
8814 * @returns VBox strict status code.
8815 *
8816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8817 * @param ppvMem Where to return the pointer to the mapped
8818 * memory.
8819 * @param cbMem The number of bytes to map. This is usually 1,
8820 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8821 * string operations it can be up to a page.
8822 * @param iSegReg The index of the segment register to use for
8823 * this access. The base and limits are checked.
8824 * Use UINT8_MAX to indicate that no segmentation
8825 * is required (for IDT, GDT and LDT accesses).
8826 * @param GCPtrMem The address of the guest memory.
8827 * @param fAccess How the memory is being accessed. The
8828 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8829 * how to map the memory, while the
8830 * IEM_ACCESS_WHAT_XXX bit is used when raising
8831 * exceptions.
8832 */
8833IEM_STATIC VBOXSTRICTRC
8834iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8835{
8836 /*
8837 * Check the input and figure out which mapping entry to use.
8838 */
8839 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8840 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8841 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8842
8843 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8844 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8845 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8846 {
8847 iMemMap = iemMemMapFindFree(pVCpu);
8848 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8849 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8850 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8851 pVCpu->iem.s.aMemMappings[2].fAccess),
8852 VERR_IEM_IPE_9);
8853 }
8854
8855 /*
8856 * Map the memory, checking that we can actually access it. If something
8857 * slightly complicated happens, fall back on bounce buffering.
8858 */
8859 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8860 if (rcStrict != VINF_SUCCESS)
8861 return rcStrict;
8862
8863 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8864 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8865
8866 RTGCPHYS GCPhysFirst;
8867 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8868 if (rcStrict != VINF_SUCCESS)
8869 return rcStrict;
8870
8871 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8872 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8873 if (fAccess & IEM_ACCESS_TYPE_READ)
8874 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8875
8876 void *pvMem;
8877 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8878 if (rcStrict != VINF_SUCCESS)
8879 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8880
8881 /*
8882 * Fill in the mapping table entry.
8883 */
8884 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8885 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8886 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8887 pVCpu->iem.s.cActiveMappings++;
8888
8889 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8890 *ppvMem = pvMem;
8891 return VINF_SUCCESS;
8892}
8893
8894
8895/**
8896 * Commits the guest memory if bounce buffered and unmaps it.
8897 *
8898 * @returns Strict VBox status code.
8899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8900 * @param pvMem The mapping.
8901 * @param fAccess The kind of access.
8902 */
8903IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8904{
8905 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8906 AssertReturn(iMemMap >= 0, iMemMap);
8907
8908 /* If it's bounce buffered, we may need to write back the buffer. */
8909 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8910 {
8911 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8912 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8913 }
8914 /* Otherwise unlock it. */
8915 else
8916 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8917
8918 /* Free the entry. */
8919 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8920 Assert(pVCpu->iem.s.cActiveMappings != 0);
8921 pVCpu->iem.s.cActiveMappings--;
8922 return VINF_SUCCESS;
8923}
8924
8925#ifdef IEM_WITH_SETJMP
8926
8927/**
8928 * Maps the specified guest memory for the given kind of access, longjmp on
8929 * error.
8930 *
8931 * This may be using bounce buffering of the memory if it's crossing a page
8932 * boundary or if there is an access handler installed for any of it. Because
8933 * of lock prefix guarantees, we're in for some extra clutter when this
8934 * happens.
8935 *
8936 * This may raise a \#GP, \#SS, \#PF or \#AC.
8937 *
8938 * @returns Pointer to the mapped memory.
8939 *
8940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8941 * @param cbMem The number of bytes to map. This is usually 1,
8942 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8943 * string operations it can be up to a page.
8944 * @param iSegReg The index of the segment register to use for
8945 * this access. The base and limits are checked.
8946 * Use UINT8_MAX to indicate that no segmentation
8947 * is required (for IDT, GDT and LDT accesses).
8948 * @param GCPtrMem The address of the guest memory.
8949 * @param fAccess How the memory is being accessed. The
8950 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8951 * how to map the memory, while the
8952 * IEM_ACCESS_WHAT_XXX bit is used when raising
8953 * exceptions.
8954 */
8955IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8956{
8957 /*
8958 * Check the input and figure out which mapping entry to use.
8959 */
8960 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8961 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8962 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8963
8964 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8965 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8966 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8967 {
8968 iMemMap = iemMemMapFindFree(pVCpu);
8969 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8970 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8971 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8972 pVCpu->iem.s.aMemMappings[2].fAccess),
8973 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8974 }
8975
8976 /*
8977 * Map the memory, checking that we can actually access it. If something
8978 * slightly complicated happens, fall back on bounce buffering.
8979 */
8980 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8981 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8982 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8983
8984 /* Crossing a page boundary? */
8985 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8986 { /* No (likely). */ }
8987 else
8988 {
8989 void *pvMem;
8990 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8991 if (rcStrict == VINF_SUCCESS)
8992 return pvMem;
8993 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8994 }
8995
8996 RTGCPHYS GCPhysFirst;
8997 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8998 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8999 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9000
9001 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9002 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9003 if (fAccess & IEM_ACCESS_TYPE_READ)
9004 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9005
9006 void *pvMem;
9007 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9008 if (rcStrict == VINF_SUCCESS)
9009 { /* likely */ }
9010 else
9011 {
9012 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9013 if (rcStrict == VINF_SUCCESS)
9014 return pvMem;
9015 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9016 }
9017
9018 /*
9019 * Fill in the mapping table entry.
9020 */
9021 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9022 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9023 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9024 pVCpu->iem.s.cActiveMappings++;
9025
9026 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9027 return pvMem;
9028}
9029
9030
9031/**
9032 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9033 *
9034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9035 * @param pvMem The mapping.
9036 * @param fAccess The kind of access.
9037 */
9038IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9039{
9040 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9041 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9042
9043 /* If it's bounce buffered, we may need to write back the buffer. */
9044 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9045 {
9046 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9047 {
9048 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9049 if (rcStrict == VINF_SUCCESS)
9050 return;
9051 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9052 }
9053 }
9054 /* Otherwise unlock it. */
9055 else
9056 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9057
9058 /* Free the entry. */
9059 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9060 Assert(pVCpu->iem.s.cActiveMappings != 0);
9061 pVCpu->iem.s.cActiveMappings--;
9062}
9063
9064#endif
9065
9066#ifndef IN_RING3
9067/**
9068 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9069 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9070 *
9071 * Allows the instruction to be completed and retired, while the IEM user will
9072 * return to ring-3 immediately afterwards and do the postponed writes there.
9073 *
9074 * @returns VBox status code (no strict statuses). Caller must check
9075 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9077 * @param pvMem The mapping.
9078 * @param fAccess The kind of access.
9079 */
9080IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9081{
9082 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9083 AssertReturn(iMemMap >= 0, iMemMap);
9084
9085 /* If it's bounce buffered, we may need to write back the buffer. */
9086 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9087 {
9088 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9089 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9090 }
9091 /* Otherwise unlock it. */
9092 else
9093 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9094
9095 /* Free the entry. */
9096 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9097 Assert(pVCpu->iem.s.cActiveMappings != 0);
9098 pVCpu->iem.s.cActiveMappings--;
9099 return VINF_SUCCESS;
9100}
9101#endif
9102
9103
9104/**
9105 * Rollbacks mappings, releasing page locks and such.
9106 *
9107 * The caller shall only call this after checking cActiveMappings.
9108 *
9109 * @returns Strict VBox status code to pass up.
9110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9111 */
9112IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9113{
9114 Assert(pVCpu->iem.s.cActiveMappings > 0);
9115
9116 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9117 while (iMemMap-- > 0)
9118 {
9119 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9120 if (fAccess != IEM_ACCESS_INVALID)
9121 {
9122 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9123 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9124 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9125 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9126 Assert(pVCpu->iem.s.cActiveMappings > 0);
9127 pVCpu->iem.s.cActiveMappings--;
9128 }
9129 }
9130}
9131
9132
9133/**
9134 * Fetches a data byte.
9135 *
9136 * @returns Strict VBox status code.
9137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9138 * @param pu8Dst Where to return the byte.
9139 * @param iSegReg The index of the segment register to use for
9140 * this access. The base and limits are checked.
9141 * @param GCPtrMem The address of the guest memory.
9142 */
9143IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9144{
9145 /* The lazy approach for now... */
9146 uint8_t const *pu8Src;
9147 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9148 if (rc == VINF_SUCCESS)
9149 {
9150 *pu8Dst = *pu8Src;
9151 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9152 }
9153 return rc;
9154}
9155
9156
9157#ifdef IEM_WITH_SETJMP
9158/**
9159 * Fetches a data byte, longjmp on error.
9160 *
9161 * @returns The byte.
9162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9163 * @param iSegReg The index of the segment register to use for
9164 * this access. The base and limits are checked.
9165 * @param GCPtrMem The address of the guest memory.
9166 */
9167DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9168{
9169 /* The lazy approach for now... */
9170 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9171 uint8_t const bRet = *pu8Src;
9172 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9173 return bRet;
9174}
9175#endif /* IEM_WITH_SETJMP */
9176
9177
9178/**
9179 * Fetches a data word.
9180 *
9181 * @returns Strict VBox status code.
9182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9183 * @param pu16Dst Where to return the word.
9184 * @param iSegReg The index of the segment register to use for
9185 * this access. The base and limits are checked.
9186 * @param GCPtrMem The address of the guest memory.
9187 */
9188IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9189{
9190 /* The lazy approach for now... */
9191 uint16_t const *pu16Src;
9192 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9193 if (rc == VINF_SUCCESS)
9194 {
9195 *pu16Dst = *pu16Src;
9196 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9197 }
9198 return rc;
9199}
9200
9201
9202#ifdef IEM_WITH_SETJMP
9203/**
9204 * Fetches a data word, longjmp on error.
9205 *
9206 * @returns The word
9207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9208 * @param iSegReg The index of the segment register to use for
9209 * this access. The base and limits are checked.
9210 * @param GCPtrMem The address of the guest memory.
9211 */
9212DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9213{
9214 /* The lazy approach for now... */
9215 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9216 uint16_t const u16Ret = *pu16Src;
9217 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9218 return u16Ret;
9219}
9220#endif
9221
9222
9223/**
9224 * Fetches a data dword.
9225 *
9226 * @returns Strict VBox status code.
9227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9228 * @param pu32Dst Where to return the dword.
9229 * @param iSegReg The index of the segment register to use for
9230 * this access. The base and limits are checked.
9231 * @param GCPtrMem The address of the guest memory.
9232 */
9233IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9234{
9235 /* The lazy approach for now... */
9236 uint32_t const *pu32Src;
9237 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9238 if (rc == VINF_SUCCESS)
9239 {
9240 *pu32Dst = *pu32Src;
9241 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9242 }
9243 return rc;
9244}
9245
9246
9247#ifdef IEM_WITH_SETJMP
9248
9249IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9250{
9251 Assert(cbMem >= 1);
9252 Assert(iSegReg < X86_SREG_COUNT);
9253
9254 /*
9255 * 64-bit mode is simpler.
9256 */
9257 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9258 {
9259 if (iSegReg >= X86_SREG_FS)
9260 {
9261 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9262 GCPtrMem += pSel->u64Base;
9263 }
9264
9265 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9266 return GCPtrMem;
9267 }
9268 /*
9269 * 16-bit and 32-bit segmentation.
9270 */
9271 else
9272 {
9273 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9274 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9275 == X86DESCATTR_P /* data, expand up */
9276 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9277 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9278 {
9279 /* expand up */
9280 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9281 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9282 && GCPtrLast32 > (uint32_t)GCPtrMem))
9283 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9284 }
9285 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9286 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9287 {
9288 /* expand down */
9289 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9290 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9291 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9292 && GCPtrLast32 > (uint32_t)GCPtrMem))
9293 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9294 }
9295 else
9296 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9297 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9298 }
9299 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9300}
9301
9302
9303IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9304{
9305 Assert(cbMem >= 1);
9306 Assert(iSegReg < X86_SREG_COUNT);
9307
9308 /*
9309 * 64-bit mode is simpler.
9310 */
9311 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9312 {
9313 if (iSegReg >= X86_SREG_FS)
9314 {
9315 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9316 GCPtrMem += pSel->u64Base;
9317 }
9318
9319 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9320 return GCPtrMem;
9321 }
9322 /*
9323 * 16-bit and 32-bit segmentation.
9324 */
9325 else
9326 {
9327 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9328 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9329 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9330 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9331 {
9332 /* expand up */
9333 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9334 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9335 && GCPtrLast32 > (uint32_t)GCPtrMem))
9336 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9337 }
9338 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9339 {
9340 /* expand down */
9341 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9342 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9343 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9344 && GCPtrLast32 > (uint32_t)GCPtrMem))
9345 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9346 }
9347 else
9348 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9349 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9350 }
9351 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9352}
9353
9354
9355/**
9356 * Fetches a data dword, longjmp on error, fallback/safe version.
9357 *
9358 * @returns The dword
9359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9360 * @param iSegReg The index of the segment register to use for
9361 * this access. The base and limits are checked.
9362 * @param GCPtrMem The address of the guest memory.
9363 */
9364IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9365{
9366 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9367 uint32_t const u32Ret = *pu32Src;
9368 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9369 return u32Ret;
9370}
9371
9372
9373/**
9374 * Fetches a data dword, longjmp on error.
9375 *
9376 * @returns The dword
9377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9378 * @param iSegReg The index of the segment register to use for
9379 * this access. The base and limits are checked.
9380 * @param GCPtrMem The address of the guest memory.
9381 */
9382DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9383{
9384# ifdef IEM_WITH_DATA_TLB
9385 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9386 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9387 {
9388 /// @todo more later.
9389 }
9390
9391 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9392# else
9393 /* The lazy approach. */
9394 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9395 uint32_t const u32Ret = *pu32Src;
9396 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9397 return u32Ret;
9398# endif
9399}
9400#endif
9401
9402
9403#ifdef SOME_UNUSED_FUNCTION
9404/**
9405 * Fetches a data dword and sign extends it to a qword.
9406 *
9407 * @returns Strict VBox status code.
9408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9409 * @param pu64Dst Where to return the sign extended value.
9410 * @param iSegReg The index of the segment register to use for
9411 * this access. The base and limits are checked.
9412 * @param GCPtrMem The address of the guest memory.
9413 */
9414IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9415{
9416 /* The lazy approach for now... */
9417 int32_t const *pi32Src;
9418 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9419 if (rc == VINF_SUCCESS)
9420 {
9421 *pu64Dst = *pi32Src;
9422 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9423 }
9424#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9425 else
9426 *pu64Dst = 0;
9427#endif
9428 return rc;
9429}
9430#endif
9431
9432
9433/**
9434 * Fetches a data qword.
9435 *
9436 * @returns Strict VBox status code.
9437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9438 * @param pu64Dst Where to return the qword.
9439 * @param iSegReg The index of the segment register to use for
9440 * this access. The base and limits are checked.
9441 * @param GCPtrMem The address of the guest memory.
9442 */
9443IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9444{
9445 /* The lazy approach for now... */
9446 uint64_t const *pu64Src;
9447 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9448 if (rc == VINF_SUCCESS)
9449 {
9450 *pu64Dst = *pu64Src;
9451 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9452 }
9453 return rc;
9454}
9455
9456
9457#ifdef IEM_WITH_SETJMP
9458/**
9459 * Fetches a data qword, longjmp on error.
9460 *
9461 * @returns The qword.
9462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9463 * @param iSegReg The index of the segment register to use for
9464 * this access. The base and limits are checked.
9465 * @param GCPtrMem The address of the guest memory.
9466 */
9467DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9468{
9469 /* The lazy approach for now... */
9470 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9471 uint64_t const u64Ret = *pu64Src;
9472 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9473 return u64Ret;
9474}
9475#endif
9476
9477
9478/**
9479 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9480 *
9481 * @returns Strict VBox status code.
9482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9483 * @param pu64Dst Where to return the qword.
9484 * @param iSegReg The index of the segment register to use for
9485 * this access. The base and limits are checked.
9486 * @param GCPtrMem The address of the guest memory.
9487 */
9488IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9489{
9490 /* The lazy approach for now... */
9491 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9492 if (RT_UNLIKELY(GCPtrMem & 15))
9493 return iemRaiseGeneralProtectionFault0(pVCpu);
9494
9495 uint64_t const *pu64Src;
9496 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9497 if (rc == VINF_SUCCESS)
9498 {
9499 *pu64Dst = *pu64Src;
9500 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9501 }
9502 return rc;
9503}
9504
9505
9506#ifdef IEM_WITH_SETJMP
9507/**
9508 * Fetches a data qword, longjmp on error.
9509 *
9510 * @returns The qword.
9511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9512 * @param iSegReg The index of the segment register to use for
9513 * this access. The base and limits are checked.
9514 * @param GCPtrMem The address of the guest memory.
9515 */
9516DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9517{
9518 /* The lazy approach for now... */
9519 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9520 if (RT_LIKELY(!(GCPtrMem & 15)))
9521 {
9522 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9523 uint64_t const u64Ret = *pu64Src;
9524 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9525 return u64Ret;
9526 }
9527
9528 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9529 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9530}
9531#endif
9532
9533
9534/**
9535 * Fetches a data tword.
9536 *
9537 * @returns Strict VBox status code.
9538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9539 * @param pr80Dst Where to return the tword.
9540 * @param iSegReg The index of the segment register to use for
9541 * this access. The base and limits are checked.
9542 * @param GCPtrMem The address of the guest memory.
9543 */
9544IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9545{
9546 /* The lazy approach for now... */
9547 PCRTFLOAT80U pr80Src;
9548 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9549 if (rc == VINF_SUCCESS)
9550 {
9551 *pr80Dst = *pr80Src;
9552 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9553 }
9554 return rc;
9555}
9556
9557
9558#ifdef IEM_WITH_SETJMP
9559/**
9560 * Fetches a data tword, longjmp on error.
9561 *
9562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9563 * @param pr80Dst Where to return the tword.
9564 * @param iSegReg The index of the segment register to use for
9565 * this access. The base and limits are checked.
9566 * @param GCPtrMem The address of the guest memory.
9567 */
9568DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9569{
9570 /* The lazy approach for now... */
9571 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9572 *pr80Dst = *pr80Src;
9573 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9574}
9575#endif
9576
9577
9578/**
9579 * Fetches a data dqword (double qword), generally SSE related.
9580 *
9581 * @returns Strict VBox status code.
9582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9583 * @param pu128Dst Where to return the qword.
9584 * @param iSegReg The index of the segment register to use for
9585 * this access. The base and limits are checked.
9586 * @param GCPtrMem The address of the guest memory.
9587 */
9588IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9589{
9590 /* The lazy approach for now... */
9591 PCRTUINT128U pu128Src;
9592 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9593 if (rc == VINF_SUCCESS)
9594 {
9595 pu128Dst->au64[0] = pu128Src->au64[0];
9596 pu128Dst->au64[1] = pu128Src->au64[1];
9597 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9598 }
9599 return rc;
9600}
9601
9602
9603#ifdef IEM_WITH_SETJMP
9604/**
9605 * Fetches a data dqword (double qword), generally SSE related.
9606 *
9607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9608 * @param pu128Dst Where to return the qword.
9609 * @param iSegReg The index of the segment register to use for
9610 * this access. The base and limits are checked.
9611 * @param GCPtrMem The address of the guest memory.
9612 */
9613IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9614{
9615 /* The lazy approach for now... */
9616 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9617 pu128Dst->au64[0] = pu128Src->au64[0];
9618 pu128Dst->au64[1] = pu128Src->au64[1];
9619 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9620}
9621#endif
9622
9623
9624/**
9625 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9626 * related.
9627 *
9628 * Raises \#GP(0) if not aligned.
9629 *
9630 * @returns Strict VBox status code.
9631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9632 * @param pu128Dst Where to return the qword.
9633 * @param iSegReg The index of the segment register to use for
9634 * this access. The base and limits are checked.
9635 * @param GCPtrMem The address of the guest memory.
9636 */
9637IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9638{
9639 /* The lazy approach for now... */
9640 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9641 if ( (GCPtrMem & 15)
9642 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9643 return iemRaiseGeneralProtectionFault0(pVCpu);
9644
9645 PCRTUINT128U pu128Src;
9646 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9647 if (rc == VINF_SUCCESS)
9648 {
9649 pu128Dst->au64[0] = pu128Src->au64[0];
9650 pu128Dst->au64[1] = pu128Src->au64[1];
9651 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9652 }
9653 return rc;
9654}
9655
9656
9657#ifdef IEM_WITH_SETJMP
9658/**
9659 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9660 * related, longjmp on error.
9661 *
9662 * Raises \#GP(0) if not aligned.
9663 *
9664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9665 * @param pu128Dst Where to return the qword.
9666 * @param iSegReg The index of the segment register to use for
9667 * this access. The base and limits are checked.
9668 * @param GCPtrMem The address of the guest memory.
9669 */
9670DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9671{
9672 /* The lazy approach for now... */
9673 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9674 if ( (GCPtrMem & 15) == 0
9675 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9676 {
9677 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9678 pu128Dst->au64[0] = pu128Src->au64[0];
9679 pu128Dst->au64[1] = pu128Src->au64[1];
9680 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9681 return;
9682 }
9683
9684 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9685 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9686}
9687#endif
9688
9689
9690/**
9691 * Fetches a data oword (octo word), generally AVX related.
9692 *
9693 * @returns Strict VBox status code.
9694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9695 * @param pu256Dst Where to return the qword.
9696 * @param iSegReg The index of the segment register to use for
9697 * this access. The base and limits are checked.
9698 * @param GCPtrMem The address of the guest memory.
9699 */
9700IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9701{
9702 /* The lazy approach for now... */
9703 PCRTUINT256U pu256Src;
9704 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9705 if (rc == VINF_SUCCESS)
9706 {
9707 pu256Dst->au64[0] = pu256Src->au64[0];
9708 pu256Dst->au64[1] = pu256Src->au64[1];
9709 pu256Dst->au64[2] = pu256Src->au64[2];
9710 pu256Dst->au64[3] = pu256Src->au64[3];
9711 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9712 }
9713 return rc;
9714}
9715
9716
9717#ifdef IEM_WITH_SETJMP
9718/**
9719 * Fetches a data oword (octo word), generally AVX related.
9720 *
9721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9722 * @param pu256Dst Where to return the qword.
9723 * @param iSegReg The index of the segment register to use for
9724 * this access. The base and limits are checked.
9725 * @param GCPtrMem The address of the guest memory.
9726 */
9727IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9728{
9729 /* The lazy approach for now... */
9730 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9731 pu256Dst->au64[0] = pu256Src->au64[0];
9732 pu256Dst->au64[1] = pu256Src->au64[1];
9733 pu256Dst->au64[2] = pu256Src->au64[2];
9734 pu256Dst->au64[3] = pu256Src->au64[3];
9735 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9736}
9737#endif
9738
9739
9740/**
9741 * Fetches a data oword (octo word) at an aligned address, generally AVX
9742 * related.
9743 *
9744 * Raises \#GP(0) if not aligned.
9745 *
9746 * @returns Strict VBox status code.
9747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9748 * @param pu256Dst Where to return the qword.
9749 * @param iSegReg The index of the segment register to use for
9750 * this access. The base and limits are checked.
9751 * @param GCPtrMem The address of the guest memory.
9752 */
9753IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9754{
9755 /* The lazy approach for now... */
9756 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9757 if (GCPtrMem & 31)
9758 return iemRaiseGeneralProtectionFault0(pVCpu);
9759
9760 PCRTUINT256U pu256Src;
9761 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9762 if (rc == VINF_SUCCESS)
9763 {
9764 pu256Dst->au64[0] = pu256Src->au64[0];
9765 pu256Dst->au64[1] = pu256Src->au64[1];
9766 pu256Dst->au64[2] = pu256Src->au64[2];
9767 pu256Dst->au64[3] = pu256Src->au64[3];
9768 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9769 }
9770 return rc;
9771}
9772
9773
9774#ifdef IEM_WITH_SETJMP
9775/**
9776 * Fetches a data oword (octo word) at an aligned address, generally AVX
9777 * related, longjmp on error.
9778 *
9779 * Raises \#GP(0) if not aligned.
9780 *
9781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9782 * @param pu256Dst Where to return the qword.
9783 * @param iSegReg The index of the segment register to use for
9784 * this access. The base and limits are checked.
9785 * @param GCPtrMem The address of the guest memory.
9786 */
9787DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9788{
9789 /* The lazy approach for now... */
9790 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9791 if ((GCPtrMem & 31) == 0)
9792 {
9793 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9794 pu256Dst->au64[0] = pu256Src->au64[0];
9795 pu256Dst->au64[1] = pu256Src->au64[1];
9796 pu256Dst->au64[2] = pu256Src->au64[2];
9797 pu256Dst->au64[3] = pu256Src->au64[3];
9798 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9799 return;
9800 }
9801
9802 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9803 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9804}
9805#endif
9806
9807
9808
9809/**
9810 * Fetches a descriptor register (lgdt, lidt).
9811 *
9812 * @returns Strict VBox status code.
9813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9814 * @param pcbLimit Where to return the limit.
9815 * @param pGCPtrBase Where to return the base.
9816 * @param iSegReg The index of the segment register to use for
9817 * this access. The base and limits are checked.
9818 * @param GCPtrMem The address of the guest memory.
9819 * @param enmOpSize The effective operand size.
9820 */
9821IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9822 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9823{
9824 /*
9825 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9826 * little special:
9827 * - The two reads are done separately.
9828 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9829 * - We suspect the 386 to actually commit the limit before the base in
9830 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9831 * don't try emulate this eccentric behavior, because it's not well
9832 * enough understood and rather hard to trigger.
9833 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9834 */
9835 VBOXSTRICTRC rcStrict;
9836 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9837 {
9838 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9839 if (rcStrict == VINF_SUCCESS)
9840 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9841 }
9842 else
9843 {
9844 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9845 if (enmOpSize == IEMMODE_32BIT)
9846 {
9847 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9848 {
9849 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9850 if (rcStrict == VINF_SUCCESS)
9851 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9852 }
9853 else
9854 {
9855 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9856 if (rcStrict == VINF_SUCCESS)
9857 {
9858 *pcbLimit = (uint16_t)uTmp;
9859 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9860 }
9861 }
9862 if (rcStrict == VINF_SUCCESS)
9863 *pGCPtrBase = uTmp;
9864 }
9865 else
9866 {
9867 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9868 if (rcStrict == VINF_SUCCESS)
9869 {
9870 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9871 if (rcStrict == VINF_SUCCESS)
9872 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9873 }
9874 }
9875 }
9876 return rcStrict;
9877}
9878
9879
9880
9881/**
9882 * Stores a data byte.
9883 *
9884 * @returns Strict VBox status code.
9885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9886 * @param iSegReg The index of the segment register to use for
9887 * this access. The base and limits are checked.
9888 * @param GCPtrMem The address of the guest memory.
9889 * @param u8Value The value to store.
9890 */
9891IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9892{
9893 /* The lazy approach for now... */
9894 uint8_t *pu8Dst;
9895 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9896 if (rc == VINF_SUCCESS)
9897 {
9898 *pu8Dst = u8Value;
9899 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9900 }
9901 return rc;
9902}
9903
9904
9905#ifdef IEM_WITH_SETJMP
9906/**
9907 * Stores a data byte, longjmp on error.
9908 *
9909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9910 * @param iSegReg The index of the segment register to use for
9911 * this access. The base and limits are checked.
9912 * @param GCPtrMem The address of the guest memory.
9913 * @param u8Value The value to store.
9914 */
9915IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9916{
9917 /* The lazy approach for now... */
9918 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9919 *pu8Dst = u8Value;
9920 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9921}
9922#endif
9923
9924
9925/**
9926 * Stores a data word.
9927 *
9928 * @returns Strict VBox status code.
9929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9930 * @param iSegReg The index of the segment register to use for
9931 * this access. The base and limits are checked.
9932 * @param GCPtrMem The address of the guest memory.
9933 * @param u16Value The value to store.
9934 */
9935IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9936{
9937 /* The lazy approach for now... */
9938 uint16_t *pu16Dst;
9939 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9940 if (rc == VINF_SUCCESS)
9941 {
9942 *pu16Dst = u16Value;
9943 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9944 }
9945 return rc;
9946}
9947
9948
9949#ifdef IEM_WITH_SETJMP
9950/**
9951 * Stores a data word, longjmp on error.
9952 *
9953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9954 * @param iSegReg The index of the segment register to use for
9955 * this access. The base and limits are checked.
9956 * @param GCPtrMem The address of the guest memory.
9957 * @param u16Value The value to store.
9958 */
9959IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9960{
9961 /* The lazy approach for now... */
9962 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9963 *pu16Dst = u16Value;
9964 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9965}
9966#endif
9967
9968
9969/**
9970 * Stores a data dword.
9971 *
9972 * @returns Strict VBox status code.
9973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9974 * @param iSegReg The index of the segment register to use for
9975 * this access. The base and limits are checked.
9976 * @param GCPtrMem The address of the guest memory.
9977 * @param u32Value The value to store.
9978 */
9979IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9980{
9981 /* The lazy approach for now... */
9982 uint32_t *pu32Dst;
9983 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9984 if (rc == VINF_SUCCESS)
9985 {
9986 *pu32Dst = u32Value;
9987 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9988 }
9989 return rc;
9990}
9991
9992
9993#ifdef IEM_WITH_SETJMP
9994/**
9995 * Stores a data dword.
9996 *
9997 * @returns Strict VBox status code.
9998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9999 * @param iSegReg The index of the segment register to use for
10000 * this access. The base and limits are checked.
10001 * @param GCPtrMem The address of the guest memory.
10002 * @param u32Value The value to store.
10003 */
10004IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10005{
10006 /* The lazy approach for now... */
10007 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10008 *pu32Dst = u32Value;
10009 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10010}
10011#endif
10012
10013
10014/**
10015 * Stores a data qword.
10016 *
10017 * @returns Strict VBox status code.
10018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10019 * @param iSegReg The index of the segment register to use for
10020 * this access. The base and limits are checked.
10021 * @param GCPtrMem The address of the guest memory.
10022 * @param u64Value The value to store.
10023 */
10024IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10025{
10026 /* The lazy approach for now... */
10027 uint64_t *pu64Dst;
10028 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10029 if (rc == VINF_SUCCESS)
10030 {
10031 *pu64Dst = u64Value;
10032 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10033 }
10034 return rc;
10035}
10036
10037
10038#ifdef IEM_WITH_SETJMP
10039/**
10040 * Stores a data qword, longjmp on error.
10041 *
10042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10043 * @param iSegReg The index of the segment register to use for
10044 * this access. The base and limits are checked.
10045 * @param GCPtrMem The address of the guest memory.
10046 * @param u64Value The value to store.
10047 */
10048IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10049{
10050 /* The lazy approach for now... */
10051 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10052 *pu64Dst = u64Value;
10053 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10054}
10055#endif
10056
10057
10058/**
10059 * Stores a data dqword.
10060 *
10061 * @returns Strict VBox status code.
10062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10063 * @param iSegReg The index of the segment register to use for
10064 * this access. The base and limits are checked.
10065 * @param GCPtrMem The address of the guest memory.
10066 * @param u128Value The value to store.
10067 */
10068IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10069{
10070 /* The lazy approach for now... */
10071 PRTUINT128U pu128Dst;
10072 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10073 if (rc == VINF_SUCCESS)
10074 {
10075 pu128Dst->au64[0] = u128Value.au64[0];
10076 pu128Dst->au64[1] = u128Value.au64[1];
10077 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10078 }
10079 return rc;
10080}
10081
10082
10083#ifdef IEM_WITH_SETJMP
10084/**
10085 * Stores a data dqword, longjmp on error.
10086 *
10087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10088 * @param iSegReg The index of the segment register to use for
10089 * this access. The base and limits are checked.
10090 * @param GCPtrMem The address of the guest memory.
10091 * @param u128Value The value to store.
10092 */
10093IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10094{
10095 /* The lazy approach for now... */
10096 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10097 pu128Dst->au64[0] = u128Value.au64[0];
10098 pu128Dst->au64[1] = u128Value.au64[1];
10099 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10100}
10101#endif
10102
10103
10104/**
10105 * Stores a data dqword, SSE aligned.
10106 *
10107 * @returns Strict VBox status code.
10108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10109 * @param iSegReg The index of the segment register to use for
10110 * this access. The base and limits are checked.
10111 * @param GCPtrMem The address of the guest memory.
10112 * @param u128Value The value to store.
10113 */
10114IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10115{
10116 /* The lazy approach for now... */
10117 if ( (GCPtrMem & 15)
10118 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10119 return iemRaiseGeneralProtectionFault0(pVCpu);
10120
10121 PRTUINT128U pu128Dst;
10122 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10123 if (rc == VINF_SUCCESS)
10124 {
10125 pu128Dst->au64[0] = u128Value.au64[0];
10126 pu128Dst->au64[1] = u128Value.au64[1];
10127 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10128 }
10129 return rc;
10130}
10131
10132
10133#ifdef IEM_WITH_SETJMP
10134/**
10135 * Stores a data dqword, SSE aligned.
10136 *
10137 * @returns Strict VBox status code.
10138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10139 * @param iSegReg The index of the segment register to use for
10140 * this access. The base and limits are checked.
10141 * @param GCPtrMem The address of the guest memory.
10142 * @param u128Value The value to store.
10143 */
10144DECL_NO_INLINE(IEM_STATIC, void)
10145iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10146{
10147 /* The lazy approach for now... */
10148 if ( (GCPtrMem & 15) == 0
10149 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10150 {
10151 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10152 pu128Dst->au64[0] = u128Value.au64[0];
10153 pu128Dst->au64[1] = u128Value.au64[1];
10154 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10155 return;
10156 }
10157
10158 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10159 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10160}
10161#endif
10162
10163
10164/**
10165 * Stores a data dqword.
10166 *
10167 * @returns Strict VBox status code.
10168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10169 * @param iSegReg The index of the segment register to use for
10170 * this access. The base and limits are checked.
10171 * @param GCPtrMem The address of the guest memory.
10172 * @param pu256Value Pointer to the value to store.
10173 */
10174IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10175{
10176 /* The lazy approach for now... */
10177 PRTUINT256U pu256Dst;
10178 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10179 if (rc == VINF_SUCCESS)
10180 {
10181 pu256Dst->au64[0] = pu256Value->au64[0];
10182 pu256Dst->au64[1] = pu256Value->au64[1];
10183 pu256Dst->au64[2] = pu256Value->au64[2];
10184 pu256Dst->au64[3] = pu256Value->au64[3];
10185 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10186 }
10187 return rc;
10188}
10189
10190
10191#ifdef IEM_WITH_SETJMP
10192/**
10193 * Stores a data dqword, longjmp on error.
10194 *
10195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10196 * @param iSegReg The index of the segment register to use for
10197 * this access. The base and limits are checked.
10198 * @param GCPtrMem The address of the guest memory.
10199 * @param pu256Value Pointer to the value to store.
10200 */
10201IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10202{
10203 /* The lazy approach for now... */
10204 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10205 pu256Dst->au64[0] = pu256Value->au64[0];
10206 pu256Dst->au64[1] = pu256Value->au64[1];
10207 pu256Dst->au64[2] = pu256Value->au64[2];
10208 pu256Dst->au64[3] = pu256Value->au64[3];
10209 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10210}
10211#endif
10212
10213
10214/**
10215 * Stores a data dqword, AVX aligned.
10216 *
10217 * @returns Strict VBox status code.
10218 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10219 * @param iSegReg The index of the segment register to use for
10220 * this access. The base and limits are checked.
10221 * @param GCPtrMem The address of the guest memory.
10222 * @param pu256Value Pointer to the value to store.
10223 */
10224IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10225{
10226 /* The lazy approach for now... */
10227 if (GCPtrMem & 31)
10228 return iemRaiseGeneralProtectionFault0(pVCpu);
10229
10230 PRTUINT256U pu256Dst;
10231 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10232 if (rc == VINF_SUCCESS)
10233 {
10234 pu256Dst->au64[0] = pu256Value->au64[0];
10235 pu256Dst->au64[1] = pu256Value->au64[1];
10236 pu256Dst->au64[2] = pu256Value->au64[2];
10237 pu256Dst->au64[3] = pu256Value->au64[3];
10238 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10239 }
10240 return rc;
10241}
10242
10243
10244#ifdef IEM_WITH_SETJMP
10245/**
10246 * Stores a data dqword, AVX aligned.
10247 *
10248 * @returns Strict VBox status code.
10249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10250 * @param iSegReg The index of the segment register to use for
10251 * this access. The base and limits are checked.
10252 * @param GCPtrMem The address of the guest memory.
10253 * @param pu256Value Pointer to the value to store.
10254 */
10255DECL_NO_INLINE(IEM_STATIC, void)
10256iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10257{
10258 /* The lazy approach for now... */
10259 if ((GCPtrMem & 31) == 0)
10260 {
10261 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10262 pu256Dst->au64[0] = pu256Value->au64[0];
10263 pu256Dst->au64[1] = pu256Value->au64[1];
10264 pu256Dst->au64[2] = pu256Value->au64[2];
10265 pu256Dst->au64[3] = pu256Value->au64[3];
10266 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10267 return;
10268 }
10269
10270 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10271 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10272}
10273#endif
10274
10275
10276/**
10277 * Stores a descriptor register (sgdt, sidt).
10278 *
10279 * @returns Strict VBox status code.
10280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10281 * @param cbLimit The limit.
10282 * @param GCPtrBase The base address.
10283 * @param iSegReg The index of the segment register to use for
10284 * this access. The base and limits are checked.
10285 * @param GCPtrMem The address of the guest memory.
10286 */
10287IEM_STATIC VBOXSTRICTRC
10288iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10289{
10290
10291 /*
10292 * The SIDT and SGDT instructions actually stores the data using two
10293 * independent writes. The instructions does not respond to opsize prefixes.
10294 */
10295 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10296 if (rcStrict == VINF_SUCCESS)
10297 {
10298 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10299 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10300 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10301 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10302 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10303 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10304 else
10305 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10306 }
10307 return rcStrict;
10308}
10309
10310
10311/**
10312 * Pushes a word onto the stack.
10313 *
10314 * @returns Strict VBox status code.
10315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10316 * @param u16Value The value to push.
10317 */
10318IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10319{
10320 /* Increment the stack pointer. */
10321 uint64_t uNewRsp;
10322 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10323 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10324
10325 /* Write the word the lazy way. */
10326 uint16_t *pu16Dst;
10327 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10328 if (rc == VINF_SUCCESS)
10329 {
10330 *pu16Dst = u16Value;
10331 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10332 }
10333
10334 /* Commit the new RSP value unless we an access handler made trouble. */
10335 if (rc == VINF_SUCCESS)
10336 pCtx->rsp = uNewRsp;
10337
10338 return rc;
10339}
10340
10341
10342/**
10343 * Pushes a dword onto the stack.
10344 *
10345 * @returns Strict VBox status code.
10346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10347 * @param u32Value The value to push.
10348 */
10349IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10350{
10351 /* Increment the stack pointer. */
10352 uint64_t uNewRsp;
10353 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10354 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10355
10356 /* Write the dword the lazy way. */
10357 uint32_t *pu32Dst;
10358 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10359 if (rc == VINF_SUCCESS)
10360 {
10361 *pu32Dst = u32Value;
10362 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10363 }
10364
10365 /* Commit the new RSP value unless we an access handler made trouble. */
10366 if (rc == VINF_SUCCESS)
10367 pCtx->rsp = uNewRsp;
10368
10369 return rc;
10370}
10371
10372
10373/**
10374 * Pushes a dword segment register value onto the stack.
10375 *
10376 * @returns Strict VBox status code.
10377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10378 * @param u32Value The value to push.
10379 */
10380IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10381{
10382 /* Increment the stack pointer. */
10383 uint64_t uNewRsp;
10384 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10385 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10386
10387 VBOXSTRICTRC rc;
10388 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10389 {
10390 /* The recompiler writes a full dword. */
10391 uint32_t *pu32Dst;
10392 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10393 if (rc == VINF_SUCCESS)
10394 {
10395 *pu32Dst = u32Value;
10396 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10397 }
10398 }
10399 else
10400 {
10401 /* The intel docs talks about zero extending the selector register
10402 value. My actual intel CPU here might be zero extending the value
10403 but it still only writes the lower word... */
10404 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10405 * happens when crossing an electric page boundrary, is the high word checked
10406 * for write accessibility or not? Probably it is. What about segment limits?
10407 * It appears this behavior is also shared with trap error codes.
10408 *
10409 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10410 * ancient hardware when it actually did change. */
10411 uint16_t *pu16Dst;
10412 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10413 if (rc == VINF_SUCCESS)
10414 {
10415 *pu16Dst = (uint16_t)u32Value;
10416 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10417 }
10418 }
10419
10420 /* Commit the new RSP value unless we an access handler made trouble. */
10421 if (rc == VINF_SUCCESS)
10422 pCtx->rsp = uNewRsp;
10423
10424 return rc;
10425}
10426
10427
10428/**
10429 * Pushes a qword onto the stack.
10430 *
10431 * @returns Strict VBox status code.
10432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10433 * @param u64Value The value to push.
10434 */
10435IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10436{
10437 /* Increment the stack pointer. */
10438 uint64_t uNewRsp;
10439 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10440 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10441
10442 /* Write the word the lazy way. */
10443 uint64_t *pu64Dst;
10444 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10445 if (rc == VINF_SUCCESS)
10446 {
10447 *pu64Dst = u64Value;
10448 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10449 }
10450
10451 /* Commit the new RSP value unless we an access handler made trouble. */
10452 if (rc == VINF_SUCCESS)
10453 pCtx->rsp = uNewRsp;
10454
10455 return rc;
10456}
10457
10458
10459/**
10460 * Pops a word from the stack.
10461 *
10462 * @returns Strict VBox status code.
10463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10464 * @param pu16Value Where to store the popped value.
10465 */
10466IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10467{
10468 /* Increment the stack pointer. */
10469 uint64_t uNewRsp;
10470 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10471 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10472
10473 /* Write the word the lazy way. */
10474 uint16_t const *pu16Src;
10475 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10476 if (rc == VINF_SUCCESS)
10477 {
10478 *pu16Value = *pu16Src;
10479 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10480
10481 /* Commit the new RSP value. */
10482 if (rc == VINF_SUCCESS)
10483 pCtx->rsp = uNewRsp;
10484 }
10485
10486 return rc;
10487}
10488
10489
10490/**
10491 * Pops a dword from the stack.
10492 *
10493 * @returns Strict VBox status code.
10494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10495 * @param pu32Value Where to store the popped value.
10496 */
10497IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10498{
10499 /* Increment the stack pointer. */
10500 uint64_t uNewRsp;
10501 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10502 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10503
10504 /* Write the word the lazy way. */
10505 uint32_t const *pu32Src;
10506 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10507 if (rc == VINF_SUCCESS)
10508 {
10509 *pu32Value = *pu32Src;
10510 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10511
10512 /* Commit the new RSP value. */
10513 if (rc == VINF_SUCCESS)
10514 pCtx->rsp = uNewRsp;
10515 }
10516
10517 return rc;
10518}
10519
10520
10521/**
10522 * Pops a qword from the stack.
10523 *
10524 * @returns Strict VBox status code.
10525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10526 * @param pu64Value Where to store the popped value.
10527 */
10528IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10529{
10530 /* Increment the stack pointer. */
10531 uint64_t uNewRsp;
10532 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10533 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10534
10535 /* Write the word the lazy way. */
10536 uint64_t const *pu64Src;
10537 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10538 if (rc == VINF_SUCCESS)
10539 {
10540 *pu64Value = *pu64Src;
10541 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10542
10543 /* Commit the new RSP value. */
10544 if (rc == VINF_SUCCESS)
10545 pCtx->rsp = uNewRsp;
10546 }
10547
10548 return rc;
10549}
10550
10551
10552/**
10553 * Pushes a word onto the stack, using a temporary stack pointer.
10554 *
10555 * @returns Strict VBox status code.
10556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10557 * @param u16Value The value to push.
10558 * @param pTmpRsp Pointer to the temporary stack pointer.
10559 */
10560IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10561{
10562 /* Increment the stack pointer. */
10563 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10564 RTUINT64U NewRsp = *pTmpRsp;
10565 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10566
10567 /* Write the word the lazy way. */
10568 uint16_t *pu16Dst;
10569 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10570 if (rc == VINF_SUCCESS)
10571 {
10572 *pu16Dst = u16Value;
10573 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10574 }
10575
10576 /* Commit the new RSP value unless we an access handler made trouble. */
10577 if (rc == VINF_SUCCESS)
10578 *pTmpRsp = NewRsp;
10579
10580 return rc;
10581}
10582
10583
10584/**
10585 * Pushes a dword onto the stack, using a temporary stack pointer.
10586 *
10587 * @returns Strict VBox status code.
10588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10589 * @param u32Value The value to push.
10590 * @param pTmpRsp Pointer to the temporary stack pointer.
10591 */
10592IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10593{
10594 /* Increment the stack pointer. */
10595 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10596 RTUINT64U NewRsp = *pTmpRsp;
10597 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10598
10599 /* Write the word the lazy way. */
10600 uint32_t *pu32Dst;
10601 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10602 if (rc == VINF_SUCCESS)
10603 {
10604 *pu32Dst = u32Value;
10605 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10606 }
10607
10608 /* Commit the new RSP value unless we an access handler made trouble. */
10609 if (rc == VINF_SUCCESS)
10610 *pTmpRsp = NewRsp;
10611
10612 return rc;
10613}
10614
10615
10616/**
10617 * Pushes a dword onto the stack, using a temporary stack pointer.
10618 *
10619 * @returns Strict VBox status code.
10620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10621 * @param u64Value The value to push.
10622 * @param pTmpRsp Pointer to the temporary stack pointer.
10623 */
10624IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10625{
10626 /* Increment the stack pointer. */
10627 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10628 RTUINT64U NewRsp = *pTmpRsp;
10629 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10630
10631 /* Write the word the lazy way. */
10632 uint64_t *pu64Dst;
10633 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10634 if (rc == VINF_SUCCESS)
10635 {
10636 *pu64Dst = u64Value;
10637 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10638 }
10639
10640 /* Commit the new RSP value unless we an access handler made trouble. */
10641 if (rc == VINF_SUCCESS)
10642 *pTmpRsp = NewRsp;
10643
10644 return rc;
10645}
10646
10647
10648/**
10649 * Pops a word from the stack, using a temporary stack pointer.
10650 *
10651 * @returns Strict VBox status code.
10652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10653 * @param pu16Value Where to store the popped value.
10654 * @param pTmpRsp Pointer to the temporary stack pointer.
10655 */
10656IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10657{
10658 /* Increment the stack pointer. */
10659 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10660 RTUINT64U NewRsp = *pTmpRsp;
10661 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10662
10663 /* Write the word the lazy way. */
10664 uint16_t const *pu16Src;
10665 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10666 if (rc == VINF_SUCCESS)
10667 {
10668 *pu16Value = *pu16Src;
10669 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10670
10671 /* Commit the new RSP value. */
10672 if (rc == VINF_SUCCESS)
10673 *pTmpRsp = NewRsp;
10674 }
10675
10676 return rc;
10677}
10678
10679
10680/**
10681 * Pops a dword from the stack, using a temporary stack pointer.
10682 *
10683 * @returns Strict VBox status code.
10684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10685 * @param pu32Value Where to store the popped value.
10686 * @param pTmpRsp Pointer to the temporary stack pointer.
10687 */
10688IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10689{
10690 /* Increment the stack pointer. */
10691 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10692 RTUINT64U NewRsp = *pTmpRsp;
10693 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10694
10695 /* Write the word the lazy way. */
10696 uint32_t const *pu32Src;
10697 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10698 if (rc == VINF_SUCCESS)
10699 {
10700 *pu32Value = *pu32Src;
10701 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10702
10703 /* Commit the new RSP value. */
10704 if (rc == VINF_SUCCESS)
10705 *pTmpRsp = NewRsp;
10706 }
10707
10708 return rc;
10709}
10710
10711
10712/**
10713 * Pops a qword from the stack, using a temporary stack pointer.
10714 *
10715 * @returns Strict VBox status code.
10716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10717 * @param pu64Value Where to store the popped value.
10718 * @param pTmpRsp Pointer to the temporary stack pointer.
10719 */
10720IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10721{
10722 /* Increment the stack pointer. */
10723 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10724 RTUINT64U NewRsp = *pTmpRsp;
10725 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10726
10727 /* Write the word the lazy way. */
10728 uint64_t const *pu64Src;
10729 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10730 if (rcStrict == VINF_SUCCESS)
10731 {
10732 *pu64Value = *pu64Src;
10733 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10734
10735 /* Commit the new RSP value. */
10736 if (rcStrict == VINF_SUCCESS)
10737 *pTmpRsp = NewRsp;
10738 }
10739
10740 return rcStrict;
10741}
10742
10743
10744/**
10745 * Begin a special stack push (used by interrupt, exceptions and such).
10746 *
10747 * This will raise \#SS or \#PF if appropriate.
10748 *
10749 * @returns Strict VBox status code.
10750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10751 * @param cbMem The number of bytes to push onto the stack.
10752 * @param ppvMem Where to return the pointer to the stack memory.
10753 * As with the other memory functions this could be
10754 * direct access or bounce buffered access, so
10755 * don't commit register until the commit call
10756 * succeeds.
10757 * @param puNewRsp Where to return the new RSP value. This must be
10758 * passed unchanged to
10759 * iemMemStackPushCommitSpecial().
10760 */
10761IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10762{
10763 Assert(cbMem < UINT8_MAX);
10764 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10765 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10766 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10767}
10768
10769
10770/**
10771 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10772 *
10773 * This will update the rSP.
10774 *
10775 * @returns Strict VBox status code.
10776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10777 * @param pvMem The pointer returned by
10778 * iemMemStackPushBeginSpecial().
10779 * @param uNewRsp The new RSP value returned by
10780 * iemMemStackPushBeginSpecial().
10781 */
10782IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10783{
10784 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10785 if (rcStrict == VINF_SUCCESS)
10786 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10787 return rcStrict;
10788}
10789
10790
10791/**
10792 * Begin a special stack pop (used by iret, retf and such).
10793 *
10794 * This will raise \#SS or \#PF if appropriate.
10795 *
10796 * @returns Strict VBox status code.
10797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10798 * @param cbMem The number of bytes to pop from the stack.
10799 * @param ppvMem Where to return the pointer to the stack memory.
10800 * @param puNewRsp Where to return the new RSP value. This must be
10801 * assigned to CPUMCTX::rsp manually some time
10802 * after iemMemStackPopDoneSpecial() has been
10803 * called.
10804 */
10805IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10806{
10807 Assert(cbMem < UINT8_MAX);
10808 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10809 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10810 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10811}
10812
10813
10814/**
10815 * Continue a special stack pop (used by iret and retf).
10816 *
10817 * This will raise \#SS or \#PF if appropriate.
10818 *
10819 * @returns Strict VBox status code.
10820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10821 * @param cbMem The number of bytes to pop from the stack.
10822 * @param ppvMem Where to return the pointer to the stack memory.
10823 * @param puNewRsp Where to return the new RSP value. This must be
10824 * assigned to CPUMCTX::rsp manually some time
10825 * after iemMemStackPopDoneSpecial() has been
10826 * called.
10827 */
10828IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10829{
10830 Assert(cbMem < UINT8_MAX);
10831 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10832 RTUINT64U NewRsp;
10833 NewRsp.u = *puNewRsp;
10834 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10835 *puNewRsp = NewRsp.u;
10836 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10837}
10838
10839
10840/**
10841 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10842 * iemMemStackPopContinueSpecial).
10843 *
10844 * The caller will manually commit the rSP.
10845 *
10846 * @returns Strict VBox status code.
10847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10848 * @param pvMem The pointer returned by
10849 * iemMemStackPopBeginSpecial() or
10850 * iemMemStackPopContinueSpecial().
10851 */
10852IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10853{
10854 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10855}
10856
10857
10858/**
10859 * Fetches a system table byte.
10860 *
10861 * @returns Strict VBox status code.
10862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10863 * @param pbDst Where to return the byte.
10864 * @param iSegReg The index of the segment register to use for
10865 * this access. The base and limits are checked.
10866 * @param GCPtrMem The address of the guest memory.
10867 */
10868IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10869{
10870 /* The lazy approach for now... */
10871 uint8_t const *pbSrc;
10872 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10873 if (rc == VINF_SUCCESS)
10874 {
10875 *pbDst = *pbSrc;
10876 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10877 }
10878 return rc;
10879}
10880
10881
10882/**
10883 * Fetches a system table word.
10884 *
10885 * @returns Strict VBox status code.
10886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10887 * @param pu16Dst Where to return the word.
10888 * @param iSegReg The index of the segment register to use for
10889 * this access. The base and limits are checked.
10890 * @param GCPtrMem The address of the guest memory.
10891 */
10892IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10893{
10894 /* The lazy approach for now... */
10895 uint16_t const *pu16Src;
10896 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10897 if (rc == VINF_SUCCESS)
10898 {
10899 *pu16Dst = *pu16Src;
10900 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10901 }
10902 return rc;
10903}
10904
10905
10906/**
10907 * Fetches a system table dword.
10908 *
10909 * @returns Strict VBox status code.
10910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10911 * @param pu32Dst Where to return the dword.
10912 * @param iSegReg The index of the segment register to use for
10913 * this access. The base and limits are checked.
10914 * @param GCPtrMem The address of the guest memory.
10915 */
10916IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10917{
10918 /* The lazy approach for now... */
10919 uint32_t const *pu32Src;
10920 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10921 if (rc == VINF_SUCCESS)
10922 {
10923 *pu32Dst = *pu32Src;
10924 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10925 }
10926 return rc;
10927}
10928
10929
10930/**
10931 * Fetches a system table qword.
10932 *
10933 * @returns Strict VBox status code.
10934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10935 * @param pu64Dst Where to return the qword.
10936 * @param iSegReg The index of the segment register to use for
10937 * this access. The base and limits are checked.
10938 * @param GCPtrMem The address of the guest memory.
10939 */
10940IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10941{
10942 /* The lazy approach for now... */
10943 uint64_t const *pu64Src;
10944 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10945 if (rc == VINF_SUCCESS)
10946 {
10947 *pu64Dst = *pu64Src;
10948 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10949 }
10950 return rc;
10951}
10952
10953
10954/**
10955 * Fetches a descriptor table entry with caller specified error code.
10956 *
10957 * @returns Strict VBox status code.
10958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10959 * @param pDesc Where to return the descriptor table entry.
10960 * @param uSel The selector which table entry to fetch.
10961 * @param uXcpt The exception to raise on table lookup error.
10962 * @param uErrorCode The error code associated with the exception.
10963 */
10964IEM_STATIC VBOXSTRICTRC
10965iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10966{
10967 AssertPtr(pDesc);
10968 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10969
10970 /** @todo did the 286 require all 8 bytes to be accessible? */
10971 /*
10972 * Get the selector table base and check bounds.
10973 */
10974 RTGCPTR GCPtrBase;
10975 if (uSel & X86_SEL_LDT)
10976 {
10977 if ( !pCtx->ldtr.Attr.n.u1Present
10978 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10979 {
10980 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10981 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10982 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10983 uErrorCode, 0);
10984 }
10985
10986 Assert(pCtx->ldtr.Attr.n.u1Present);
10987 GCPtrBase = pCtx->ldtr.u64Base;
10988 }
10989 else
10990 {
10991 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10992 {
10993 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10994 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10995 uErrorCode, 0);
10996 }
10997 GCPtrBase = pCtx->gdtr.pGdt;
10998 }
10999
11000 /*
11001 * Read the legacy descriptor and maybe the long mode extensions if
11002 * required.
11003 */
11004 VBOXSTRICTRC rcStrict;
11005 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11006 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11007 else
11008 {
11009 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11010 if (rcStrict == VINF_SUCCESS)
11011 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11012 if (rcStrict == VINF_SUCCESS)
11013 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11014 if (rcStrict == VINF_SUCCESS)
11015 pDesc->Legacy.au16[3] = 0;
11016 else
11017 return rcStrict;
11018 }
11019
11020 if (rcStrict == VINF_SUCCESS)
11021 {
11022 if ( !IEM_IS_LONG_MODE(pVCpu)
11023 || pDesc->Legacy.Gen.u1DescType)
11024 pDesc->Long.au64[1] = 0;
11025 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
11026 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11027 else
11028 {
11029 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11030 /** @todo is this the right exception? */
11031 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11032 }
11033 }
11034 return rcStrict;
11035}
11036
11037
11038/**
11039 * Fetches a descriptor table entry.
11040 *
11041 * @returns Strict VBox status code.
11042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11043 * @param pDesc Where to return the descriptor table entry.
11044 * @param uSel The selector which table entry to fetch.
11045 * @param uXcpt The exception to raise on table lookup error.
11046 */
11047IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11048{
11049 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11050}
11051
11052
11053/**
11054 * Fakes a long mode stack selector for SS = 0.
11055 *
11056 * @param pDescSs Where to return the fake stack descriptor.
11057 * @param uDpl The DPL we want.
11058 */
11059IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11060{
11061 pDescSs->Long.au64[0] = 0;
11062 pDescSs->Long.au64[1] = 0;
11063 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11064 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11065 pDescSs->Long.Gen.u2Dpl = uDpl;
11066 pDescSs->Long.Gen.u1Present = 1;
11067 pDescSs->Long.Gen.u1Long = 1;
11068}
11069
11070
11071/**
11072 * Marks the selector descriptor as accessed (only non-system descriptors).
11073 *
11074 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11075 * will therefore skip the limit checks.
11076 *
11077 * @returns Strict VBox status code.
11078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11079 * @param uSel The selector.
11080 */
11081IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11082{
11083 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11084
11085 /*
11086 * Get the selector table base and calculate the entry address.
11087 */
11088 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11089 ? pCtx->ldtr.u64Base
11090 : pCtx->gdtr.pGdt;
11091 GCPtr += uSel & X86_SEL_MASK;
11092
11093 /*
11094 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11095 * ugly stuff to avoid this. This will make sure it's an atomic access
11096 * as well more or less remove any question about 8-bit or 32-bit accesss.
11097 */
11098 VBOXSTRICTRC rcStrict;
11099 uint32_t volatile *pu32;
11100 if ((GCPtr & 3) == 0)
11101 {
11102 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11103 GCPtr += 2 + 2;
11104 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11105 if (rcStrict != VINF_SUCCESS)
11106 return rcStrict;
11107 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11108 }
11109 else
11110 {
11111 /* The misaligned GDT/LDT case, map the whole thing. */
11112 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11113 if (rcStrict != VINF_SUCCESS)
11114 return rcStrict;
11115 switch ((uintptr_t)pu32 & 3)
11116 {
11117 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11118 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11119 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11120 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11121 }
11122 }
11123
11124 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11125}
11126
11127/** @} */
11128
11129
11130/*
11131 * Include the C/C++ implementation of instruction.
11132 */
11133#include "IEMAllCImpl.cpp.h"
11134
11135
11136
11137/** @name "Microcode" macros.
11138 *
11139 * The idea is that we should be able to use the same code to interpret
11140 * instructions as well as recompiler instructions. Thus this obfuscation.
11141 *
11142 * @{
11143 */
11144#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11145#define IEM_MC_END() }
11146#define IEM_MC_PAUSE() do {} while (0)
11147#define IEM_MC_CONTINUE() do {} while (0)
11148
11149/** Internal macro. */
11150#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11151 do \
11152 { \
11153 VBOXSTRICTRC rcStrict2 = a_Expr; \
11154 if (rcStrict2 != VINF_SUCCESS) \
11155 return rcStrict2; \
11156 } while (0)
11157
11158
11159#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11160#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11161#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11162#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11163#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11164#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11165#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11166#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11167#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11168 do { \
11169 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11170 return iemRaiseDeviceNotAvailable(pVCpu); \
11171 } while (0)
11172#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11173 do { \
11174 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11175 return iemRaiseDeviceNotAvailable(pVCpu); \
11176 } while (0)
11177#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11178 do { \
11179 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11180 return iemRaiseMathFault(pVCpu); \
11181 } while (0)
11182#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11183 do { \
11184 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11185 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11186 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11187 return iemRaiseUndefinedOpcode(pVCpu); \
11188 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11189 return iemRaiseDeviceNotAvailable(pVCpu); \
11190 } while (0)
11191#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11192 do { \
11193 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11194 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11195 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11196 return iemRaiseUndefinedOpcode(pVCpu); \
11197 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11198 return iemRaiseDeviceNotAvailable(pVCpu); \
11199 } while (0)
11200#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11201 do { \
11202 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11203 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11204 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11205 return iemRaiseUndefinedOpcode(pVCpu); \
11206 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11207 return iemRaiseDeviceNotAvailable(pVCpu); \
11208 } while (0)
11209#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11210 do { \
11211 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11212 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11213 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11214 return iemRaiseUndefinedOpcode(pVCpu); \
11215 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11216 return iemRaiseDeviceNotAvailable(pVCpu); \
11217 } while (0)
11218#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11219 do { \
11220 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11221 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11222 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11223 return iemRaiseUndefinedOpcode(pVCpu); \
11224 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11225 return iemRaiseDeviceNotAvailable(pVCpu); \
11226 } while (0)
11227#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11228 do { \
11229 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11230 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11231 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11232 return iemRaiseUndefinedOpcode(pVCpu); \
11233 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11234 return iemRaiseDeviceNotAvailable(pVCpu); \
11235 } while (0)
11236#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11237 do { \
11238 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11239 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11240 return iemRaiseUndefinedOpcode(pVCpu); \
11241 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11242 return iemRaiseDeviceNotAvailable(pVCpu); \
11243 } while (0)
11244#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11245 do { \
11246 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11247 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11248 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11249 return iemRaiseUndefinedOpcode(pVCpu); \
11250 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11251 return iemRaiseDeviceNotAvailable(pVCpu); \
11252 } while (0)
11253#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11254 do { \
11255 if (pVCpu->iem.s.uCpl != 0) \
11256 return iemRaiseGeneralProtectionFault0(pVCpu); \
11257 } while (0)
11258#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11259 do { \
11260 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11261 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11262 } while (0)
11263#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11264 do { \
11265 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11266 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11267 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_FSGSBASE)) \
11268 return iemRaiseUndefinedOpcode(pVCpu); \
11269 } while (0)
11270#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11271 do { \
11272 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11273 return iemRaiseGeneralProtectionFault0(pVCpu); \
11274 } while (0)
11275
11276
11277#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11278#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11279#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11280#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11281#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11282#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11283#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11284 uint32_t a_Name; \
11285 uint32_t *a_pName = &a_Name
11286#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11287 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11288
11289#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11290#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11291
11292#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11293#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11294#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11295#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11296#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11297#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11298#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11299#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11300#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11301#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11302#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11303#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11304#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11305#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11306#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11309#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11310#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11311#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11312#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11313#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11314#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11315#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11316#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11317#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11318#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11319#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11320#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11321#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11322#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11323/** @note Not for IOPL or IF testing or modification. */
11324#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11325#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11326#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11327#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11328
11329#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11330#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11331#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11332#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11333#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11334#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11335#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11336#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11337#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11338#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11339#define IEM_MC_STORE_SREG_BASE_U64(a_iSeg, a_u64Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (a_u64Value)
11340#define IEM_MC_STORE_SREG_BASE_U32(a_iSeg, a_u32Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11341#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11342 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11343
11344
11345#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11346#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11347/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11348 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11349#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11350#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11351/** @note Not for IOPL or IF testing or modification. */
11352#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11353
11354#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11355#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11356#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11357 do { \
11358 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11359 *pu32Reg += (a_u32Value); \
11360 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11361 } while (0)
11362#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11363
11364#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11365#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11366#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11367 do { \
11368 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11369 *pu32Reg -= (a_u32Value); \
11370 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11371 } while (0)
11372#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11373#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11374
11375#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11376#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11377#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11378#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11379#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11380#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11381#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11382
11383#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11384#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11385#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11386#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11387
11388#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11389#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11390#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11391
11392#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11393#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11394#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11395
11396#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11397#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11398#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11399
11400#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11401#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11402#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11403
11404#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11405
11406#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11407
11408#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11409#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11410#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11411 do { \
11412 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11413 *pu32Reg &= (a_u32Value); \
11414 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11415 } while (0)
11416#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11417
11418#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11419#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11420#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11421 do { \
11422 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11423 *pu32Reg |= (a_u32Value); \
11424 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11425 } while (0)
11426#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11427
11428
11429/** @note Not for IOPL or IF modification. */
11430#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11431/** @note Not for IOPL or IF modification. */
11432#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11433/** @note Not for IOPL or IF modification. */
11434#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11435
11436#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11437
11438/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11439#define IEM_MC_FPU_TO_MMX_MODE() do { \
11440 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11441 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11442 } while (0)
11443
11444/** Switches the FPU state from MMX mode (FTW=0xffff). */
11445#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11446 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0; \
11447 } while (0)
11448
11449#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11450 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11451#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11452 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11453#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11454 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11455 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11456 } while (0)
11457#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11458 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11459 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11460 } while (0)
11461#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11462 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11463#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11464 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11465#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11466 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11467
11468#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11469 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11470 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11471 } while (0)
11472#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11473 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11474#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11475 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11476#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11477 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11478#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11479 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11480 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11481 } while (0)
11482#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11483 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11484#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11485 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11486 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11487 } while (0)
11488#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11489 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11490#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11491 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11492 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11493 } while (0)
11494#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11495 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11496#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11497 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11498#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11499 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11500#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11501 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11502#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11503 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11504 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11505 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11506 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11507 } while (0)
11508
11509#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11510 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11511 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11512 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11513 } while (0)
11514#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11515 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11516 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11517 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11518 } while (0)
11519#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11520 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11521 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11522 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11523 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11524 } while (0)
11525#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11526 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11527 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11528 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11529 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11530 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11531 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11532 } while (0)
11533
11534#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11535#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11536 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11537 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11538 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11539 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11540 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11541 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11542 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11543 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11544 } while (0)
11545#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11546 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11547 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11548 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11549 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11550 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11551 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11552 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11553 } while (0)
11554#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11555 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11556 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11557 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11558 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11559 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11560 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11561 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11562 } while (0)
11563#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11564 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11565 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11566 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11567 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11568 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11569 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11570 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11571 } while (0)
11572
11573#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11574 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11575#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11576 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11577#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11578 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11579#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11580 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11581 uintptr_t const iYRegTmp = (a_iYReg); \
11582 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11583 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11584 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11585 } while (0)
11586
11587#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11588 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11589 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11590 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11591 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11592 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11593 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11594 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11595 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11596 } while (0)
11597#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11598 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11599 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11600 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11601 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11602 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11603 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11604 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11605 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11606 } while (0)
11607#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11608 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11609 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11610 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11611 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11612 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11613 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11614 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11615 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11616 } while (0)
11617
11618#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11619 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11620 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11621 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11622 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11623 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11624 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11625 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11626 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11627 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11628 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11629 } while (0)
11630#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11631 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11632 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11633 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11634 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11635 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11636 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11637 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11638 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11639 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11640 } while (0)
11641#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11642 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11643 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11644 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11645 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11646 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11647 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11648 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11649 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11650 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11651 } while (0)
11652#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11653 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11654 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11655 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11656 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11657 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11658 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11659 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11660 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11661 } while (0)
11662
11663#ifndef IEM_WITH_SETJMP
11664# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11665 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11666# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11667 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11668# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11669 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11670#else
11671# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11672 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11673# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11674 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11675# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11676 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11677#endif
11678
11679#ifndef IEM_WITH_SETJMP
11680# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11682# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11683 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11684# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11685 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11686#else
11687# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11688 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11689# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11690 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11691# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11692 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11693#endif
11694
11695#ifndef IEM_WITH_SETJMP
11696# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11697 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11698# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11700# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11701 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11702#else
11703# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11704 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11705# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11706 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11707# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11708 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11709#endif
11710
11711#ifdef SOME_UNUSED_FUNCTION
11712# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11714#endif
11715
11716#ifndef IEM_WITH_SETJMP
11717# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11718 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11719# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11720 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11721# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11723# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11724 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11725#else
11726# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11727 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11728# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11729 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11730# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11731 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11732# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11733 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11734#endif
11735
11736#ifndef IEM_WITH_SETJMP
11737# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11738 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11739# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11741# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11742 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11743#else
11744# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11745 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11746# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11747 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11748# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11749 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11750#endif
11751
11752#ifndef IEM_WITH_SETJMP
11753# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11754 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11755# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11756 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11757#else
11758# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11759 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11760# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11761 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11762#endif
11763
11764#ifndef IEM_WITH_SETJMP
11765# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11766 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11767# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11769#else
11770# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11771 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11772# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11773 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11774#endif
11775
11776
11777
11778#ifndef IEM_WITH_SETJMP
11779# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11780 do { \
11781 uint8_t u8Tmp; \
11782 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11783 (a_u16Dst) = u8Tmp; \
11784 } while (0)
11785# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11786 do { \
11787 uint8_t u8Tmp; \
11788 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11789 (a_u32Dst) = u8Tmp; \
11790 } while (0)
11791# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11792 do { \
11793 uint8_t u8Tmp; \
11794 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11795 (a_u64Dst) = u8Tmp; \
11796 } while (0)
11797# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11798 do { \
11799 uint16_t u16Tmp; \
11800 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11801 (a_u32Dst) = u16Tmp; \
11802 } while (0)
11803# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11804 do { \
11805 uint16_t u16Tmp; \
11806 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11807 (a_u64Dst) = u16Tmp; \
11808 } while (0)
11809# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11810 do { \
11811 uint32_t u32Tmp; \
11812 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11813 (a_u64Dst) = u32Tmp; \
11814 } while (0)
11815#else /* IEM_WITH_SETJMP */
11816# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11817 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11818# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11819 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11820# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11821 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11822# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11823 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11824# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11825 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11826# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11827 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11828#endif /* IEM_WITH_SETJMP */
11829
11830#ifndef IEM_WITH_SETJMP
11831# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11832 do { \
11833 uint8_t u8Tmp; \
11834 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11835 (a_u16Dst) = (int8_t)u8Tmp; \
11836 } while (0)
11837# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11838 do { \
11839 uint8_t u8Tmp; \
11840 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11841 (a_u32Dst) = (int8_t)u8Tmp; \
11842 } while (0)
11843# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11844 do { \
11845 uint8_t u8Tmp; \
11846 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11847 (a_u64Dst) = (int8_t)u8Tmp; \
11848 } while (0)
11849# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11850 do { \
11851 uint16_t u16Tmp; \
11852 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11853 (a_u32Dst) = (int16_t)u16Tmp; \
11854 } while (0)
11855# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11856 do { \
11857 uint16_t u16Tmp; \
11858 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11859 (a_u64Dst) = (int16_t)u16Tmp; \
11860 } while (0)
11861# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11862 do { \
11863 uint32_t u32Tmp; \
11864 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11865 (a_u64Dst) = (int32_t)u32Tmp; \
11866 } while (0)
11867#else /* IEM_WITH_SETJMP */
11868# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11869 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11870# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11871 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11872# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11873 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11874# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11875 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11876# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11877 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11878# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11879 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11880#endif /* IEM_WITH_SETJMP */
11881
11882#ifndef IEM_WITH_SETJMP
11883# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11884 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11885# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11886 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11887# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11888 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11889# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11890 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11891#else
11892# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11893 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11894# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11895 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11896# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11897 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11898# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11899 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11900#endif
11901
11902#ifndef IEM_WITH_SETJMP
11903# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11904 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11905# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11906 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11907# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11908 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11909# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11910 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11911#else
11912# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11913 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11914# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11915 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11916# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11917 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11918# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11919 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11920#endif
11921
11922#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11923#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11924#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11925#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11926#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11927#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11928#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11929 do { \
11930 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11931 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11932 } while (0)
11933
11934#ifndef IEM_WITH_SETJMP
11935# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11936 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11937# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11938 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11939#else
11940# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11941 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11942# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11943 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11944#endif
11945
11946#ifndef IEM_WITH_SETJMP
11947# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11948 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11949# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11950 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11951#else
11952# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11953 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11954# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11955 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11956#endif
11957
11958
11959#define IEM_MC_PUSH_U16(a_u16Value) \
11960 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11961#define IEM_MC_PUSH_U32(a_u32Value) \
11962 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11963#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11964 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11965#define IEM_MC_PUSH_U64(a_u64Value) \
11966 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11967
11968#define IEM_MC_POP_U16(a_pu16Value) \
11969 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11970#define IEM_MC_POP_U32(a_pu32Value) \
11971 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11972#define IEM_MC_POP_U64(a_pu64Value) \
11973 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11974
11975/** Maps guest memory for direct or bounce buffered access.
11976 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11977 * @remarks May return.
11978 */
11979#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11980 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11981
11982/** Maps guest memory for direct or bounce buffered access.
11983 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11984 * @remarks May return.
11985 */
11986#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11987 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11988
11989/** Commits the memory and unmaps the guest memory.
11990 * @remarks May return.
11991 */
11992#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11993 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11994
11995/** Commits the memory and unmaps the guest memory unless the FPU status word
11996 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11997 * that would cause FLD not to store.
11998 *
11999 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12000 * store, while \#P will not.
12001 *
12002 * @remarks May in theory return - for now.
12003 */
12004#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12005 do { \
12006 if ( !(a_u16FSW & X86_FSW_ES) \
12007 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12008 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12009 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12010 } while (0)
12011
12012/** Calculate efficient address from R/M. */
12013#ifndef IEM_WITH_SETJMP
12014# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12015 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12016#else
12017# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12018 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12019#endif
12020
12021#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12022#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12023#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12024#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12025#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12026#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12027#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12028
12029/**
12030 * Defers the rest of the instruction emulation to a C implementation routine
12031 * and returns, only taking the standard parameters.
12032 *
12033 * @param a_pfnCImpl The pointer to the C routine.
12034 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12035 */
12036#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12037
12038/**
12039 * Defers the rest of instruction emulation to a C implementation routine and
12040 * returns, taking one argument in addition to the standard ones.
12041 *
12042 * @param a_pfnCImpl The pointer to the C routine.
12043 * @param a0 The argument.
12044 */
12045#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12046
12047/**
12048 * Defers the rest of the instruction emulation to a C implementation routine
12049 * and returns, taking two arguments in addition to the standard ones.
12050 *
12051 * @param a_pfnCImpl The pointer to the C routine.
12052 * @param a0 The first extra argument.
12053 * @param a1 The second extra argument.
12054 */
12055#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12056
12057/**
12058 * Defers the rest of the instruction emulation to a C implementation routine
12059 * and returns, taking three arguments in addition to the standard ones.
12060 *
12061 * @param a_pfnCImpl The pointer to the C routine.
12062 * @param a0 The first extra argument.
12063 * @param a1 The second extra argument.
12064 * @param a2 The third extra argument.
12065 */
12066#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12067
12068/**
12069 * Defers the rest of the instruction emulation to a C implementation routine
12070 * and returns, taking four arguments in addition to the standard ones.
12071 *
12072 * @param a_pfnCImpl The pointer to the C routine.
12073 * @param a0 The first extra argument.
12074 * @param a1 The second extra argument.
12075 * @param a2 The third extra argument.
12076 * @param a3 The fourth extra argument.
12077 */
12078#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12079
12080/**
12081 * Defers the rest of the instruction emulation to a C implementation routine
12082 * and returns, taking two arguments in addition to the standard ones.
12083 *
12084 * @param a_pfnCImpl The pointer to the C routine.
12085 * @param a0 The first extra argument.
12086 * @param a1 The second extra argument.
12087 * @param a2 The third extra argument.
12088 * @param a3 The fourth extra argument.
12089 * @param a4 The fifth extra argument.
12090 */
12091#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12092
12093/**
12094 * Defers the entire instruction emulation to a C implementation routine and
12095 * returns, only taking the standard parameters.
12096 *
12097 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12098 *
12099 * @param a_pfnCImpl The pointer to the C routine.
12100 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12101 */
12102#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12103
12104/**
12105 * Defers the entire instruction emulation to a C implementation routine and
12106 * returns, taking one argument in addition to the standard ones.
12107 *
12108 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12109 *
12110 * @param a_pfnCImpl The pointer to the C routine.
12111 * @param a0 The argument.
12112 */
12113#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12114
12115/**
12116 * Defers the entire instruction emulation to a C implementation routine and
12117 * returns, taking two arguments in addition to the standard ones.
12118 *
12119 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12120 *
12121 * @param a_pfnCImpl The pointer to the C routine.
12122 * @param a0 The first extra argument.
12123 * @param a1 The second extra argument.
12124 */
12125#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12126
12127/**
12128 * Defers the entire instruction emulation to a C implementation routine and
12129 * returns, taking three arguments in addition to the standard ones.
12130 *
12131 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12132 *
12133 * @param a_pfnCImpl The pointer to the C routine.
12134 * @param a0 The first extra argument.
12135 * @param a1 The second extra argument.
12136 * @param a2 The third extra argument.
12137 */
12138#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12139
12140/**
12141 * Calls a FPU assembly implementation taking one visible argument.
12142 *
12143 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12144 * @param a0 The first extra argument.
12145 */
12146#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12147 do { \
12148 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12149 } while (0)
12150
12151/**
12152 * Calls a FPU assembly implementation taking two visible arguments.
12153 *
12154 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12155 * @param a0 The first extra argument.
12156 * @param a1 The second extra argument.
12157 */
12158#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12159 do { \
12160 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12161 } while (0)
12162
12163/**
12164 * Calls a FPU assembly implementation taking three visible arguments.
12165 *
12166 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12167 * @param a0 The first extra argument.
12168 * @param a1 The second extra argument.
12169 * @param a2 The third extra argument.
12170 */
12171#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12172 do { \
12173 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12174 } while (0)
12175
12176#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12177 do { \
12178 (a_FpuData).FSW = (a_FSW); \
12179 (a_FpuData).r80Result = *(a_pr80Value); \
12180 } while (0)
12181
12182/** Pushes FPU result onto the stack. */
12183#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12184 iemFpuPushResult(pVCpu, &a_FpuData)
12185/** Pushes FPU result onto the stack and sets the FPUDP. */
12186#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12187 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12188
12189/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12190#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12191 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12192
12193/** Stores FPU result in a stack register. */
12194#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12195 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12196/** Stores FPU result in a stack register and pops the stack. */
12197#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12198 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12199/** Stores FPU result in a stack register and sets the FPUDP. */
12200#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12201 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12202/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12203 * stack. */
12204#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12205 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12206
12207/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12208#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12209 iemFpuUpdateOpcodeAndIp(pVCpu)
12210/** Free a stack register (for FFREE and FFREEP). */
12211#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12212 iemFpuStackFree(pVCpu, a_iStReg)
12213/** Increment the FPU stack pointer. */
12214#define IEM_MC_FPU_STACK_INC_TOP() \
12215 iemFpuStackIncTop(pVCpu)
12216/** Decrement the FPU stack pointer. */
12217#define IEM_MC_FPU_STACK_DEC_TOP() \
12218 iemFpuStackDecTop(pVCpu)
12219
12220/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12221#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12222 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12223/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12224#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12225 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12226/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12227#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12228 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12229/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12230#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12231 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12232/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12233 * stack. */
12234#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12235 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12236/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12237#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12238 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12239
12240/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12241#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12242 iemFpuStackUnderflow(pVCpu, a_iStDst)
12243/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12244 * stack. */
12245#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12246 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12247/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12248 * FPUDS. */
12249#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12250 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12251/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12252 * FPUDS. Pops stack. */
12253#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12254 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12255/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12256 * stack twice. */
12257#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12258 iemFpuStackUnderflowThenPopPop(pVCpu)
12259/** Raises a FPU stack underflow exception for an instruction pushing a result
12260 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12261#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12262 iemFpuStackPushUnderflow(pVCpu)
12263/** Raises a FPU stack underflow exception for an instruction pushing a result
12264 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12265#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12266 iemFpuStackPushUnderflowTwo(pVCpu)
12267
12268/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12269 * FPUIP, FPUCS and FOP. */
12270#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12271 iemFpuStackPushOverflow(pVCpu)
12272/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12273 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12274#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12275 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12276/** Prepares for using the FPU state.
12277 * Ensures that we can use the host FPU in the current context (RC+R0.
12278 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12279#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12280/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12281#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12282/** Actualizes the guest FPU state so it can be accessed and modified. */
12283#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12284
12285/** Prepares for using the SSE state.
12286 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12287 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12288#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12289/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12290#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12291/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12292#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12293
12294/** Prepares for using the AVX state.
12295 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12296 * Ensures the guest AVX state in the CPUMCTX is up to date.
12297 * @note This will include the AVX512 state too when support for it is added
12298 * due to the zero extending feature of VEX instruction. */
12299#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12300/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12301#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12302/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12303#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12304
12305/**
12306 * Calls a MMX assembly implementation taking two visible arguments.
12307 *
12308 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12309 * @param a0 The first extra argument.
12310 * @param a1 The second extra argument.
12311 */
12312#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12313 do { \
12314 IEM_MC_PREPARE_FPU_USAGE(); \
12315 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12316 } while (0)
12317
12318/**
12319 * Calls a MMX assembly implementation taking three visible arguments.
12320 *
12321 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12322 * @param a0 The first extra argument.
12323 * @param a1 The second extra argument.
12324 * @param a2 The third extra argument.
12325 */
12326#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12327 do { \
12328 IEM_MC_PREPARE_FPU_USAGE(); \
12329 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12330 } while (0)
12331
12332
12333/**
12334 * Calls a SSE assembly implementation taking two visible arguments.
12335 *
12336 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12337 * @param a0 The first extra argument.
12338 * @param a1 The second extra argument.
12339 */
12340#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12341 do { \
12342 IEM_MC_PREPARE_SSE_USAGE(); \
12343 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12344 } while (0)
12345
12346/**
12347 * Calls a SSE assembly implementation taking three visible arguments.
12348 *
12349 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12350 * @param a0 The first extra argument.
12351 * @param a1 The second extra argument.
12352 * @param a2 The third extra argument.
12353 */
12354#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12355 do { \
12356 IEM_MC_PREPARE_SSE_USAGE(); \
12357 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12358 } while (0)
12359
12360
12361/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12362 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12363#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12364 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0)
12365
12366/**
12367 * Calls a AVX assembly implementation taking two visible arguments.
12368 *
12369 * There is one implicit zero'th argument, a pointer to the extended state.
12370 *
12371 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12372 * @param a1 The first extra argument.
12373 * @param a2 The second extra argument.
12374 */
12375#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12376 do { \
12377 IEM_MC_PREPARE_AVX_USAGE(); \
12378 a_pfnAImpl(pXState, (a1), (a2)); \
12379 } while (0)
12380
12381/**
12382 * Calls a AVX assembly implementation taking three visible arguments.
12383 *
12384 * There is one implicit zero'th argument, a pointer to the extended state.
12385 *
12386 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12387 * @param a1 The first extra argument.
12388 * @param a2 The second extra argument.
12389 * @param a3 The third extra argument.
12390 */
12391#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12392 do { \
12393 IEM_MC_PREPARE_AVX_USAGE(); \
12394 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12395 } while (0)
12396
12397/** @note Not for IOPL or IF testing. */
12398#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12399/** @note Not for IOPL or IF testing. */
12400#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12401/** @note Not for IOPL or IF testing. */
12402#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12403/** @note Not for IOPL or IF testing. */
12404#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12405/** @note Not for IOPL or IF testing. */
12406#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12407 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12408 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12409/** @note Not for IOPL or IF testing. */
12410#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12411 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12412 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12413/** @note Not for IOPL or IF testing. */
12414#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12415 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12416 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12417 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12418/** @note Not for IOPL or IF testing. */
12419#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12420 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12421 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12422 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12423#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12424#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12425#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12426/** @note Not for IOPL or IF testing. */
12427#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12428 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12429 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12430/** @note Not for IOPL or IF testing. */
12431#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12432 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12433 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12434/** @note Not for IOPL or IF testing. */
12435#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12436 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12437 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12438/** @note Not for IOPL or IF testing. */
12439#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12440 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12441 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12442/** @note Not for IOPL or IF testing. */
12443#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12444 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12445 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12446/** @note Not for IOPL or IF testing. */
12447#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12448 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12449 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12450#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12451#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12452
12453#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12454 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12455#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12456 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12457#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12458 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12459#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12460 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12461#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12462 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12463#define IEM_MC_IF_FCW_IM() \
12464 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12465
12466#define IEM_MC_ELSE() } else {
12467#define IEM_MC_ENDIF() } do {} while (0)
12468
12469/** @} */
12470
12471
12472/** @name Opcode Debug Helpers.
12473 * @{
12474 */
12475#ifdef VBOX_WITH_STATISTICS
12476# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12477#else
12478# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12479#endif
12480
12481#ifdef DEBUG
12482# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12483 do { \
12484 IEMOP_INC_STATS(a_Stats); \
12485 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12486 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12487 } while (0)
12488
12489# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12490 do { \
12491 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12492 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12493 (void)RT_CONCAT(OP_,a_Upper); \
12494 (void)(a_fDisHints); \
12495 (void)(a_fIemHints); \
12496 } while (0)
12497
12498# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12499 do { \
12500 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12501 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12502 (void)RT_CONCAT(OP_,a_Upper); \
12503 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12504 (void)(a_fDisHints); \
12505 (void)(a_fIemHints); \
12506 } while (0)
12507
12508# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12509 do { \
12510 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12511 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12512 (void)RT_CONCAT(OP_,a_Upper); \
12513 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12514 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12515 (void)(a_fDisHints); \
12516 (void)(a_fIemHints); \
12517 } while (0)
12518
12519# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12520 do { \
12521 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12522 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12523 (void)RT_CONCAT(OP_,a_Upper); \
12524 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12525 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12526 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12527 (void)(a_fDisHints); \
12528 (void)(a_fIemHints); \
12529 } while (0)
12530
12531# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12532 do { \
12533 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12534 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12535 (void)RT_CONCAT(OP_,a_Upper); \
12536 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12537 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12538 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12539 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12540 (void)(a_fDisHints); \
12541 (void)(a_fIemHints); \
12542 } while (0)
12543
12544#else
12545# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12546
12547# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12548 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12549# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12550 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12551# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12552 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12553# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12554 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12555# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12556 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12557
12558#endif
12559
12560#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12561 IEMOP_MNEMONIC0EX(a_Lower, \
12562 #a_Lower, \
12563 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12564#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12565 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12566 #a_Lower " " #a_Op1, \
12567 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12568#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12569 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12570 #a_Lower " " #a_Op1 "," #a_Op2, \
12571 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12572#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12573 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12574 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12575 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12576#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12577 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12578 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12579 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12580
12581/** @} */
12582
12583
12584/** @name Opcode Helpers.
12585 * @{
12586 */
12587
12588#ifdef IN_RING3
12589# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12590 do { \
12591 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12592 else \
12593 { \
12594 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12595 return IEMOP_RAISE_INVALID_OPCODE(); \
12596 } \
12597 } while (0)
12598#else
12599# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12600 do { \
12601 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12602 else return IEMOP_RAISE_INVALID_OPCODE(); \
12603 } while (0)
12604#endif
12605
12606/** The instruction requires a 186 or later. */
12607#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12608# define IEMOP_HLP_MIN_186() do { } while (0)
12609#else
12610# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12611#endif
12612
12613/** The instruction requires a 286 or later. */
12614#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12615# define IEMOP_HLP_MIN_286() do { } while (0)
12616#else
12617# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12618#endif
12619
12620/** The instruction requires a 386 or later. */
12621#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12622# define IEMOP_HLP_MIN_386() do { } while (0)
12623#else
12624# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12625#endif
12626
12627/** The instruction requires a 386 or later if the given expression is true. */
12628#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12629# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12630#else
12631# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12632#endif
12633
12634/** The instruction requires a 486 or later. */
12635#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12636# define IEMOP_HLP_MIN_486() do { } while (0)
12637#else
12638# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12639#endif
12640
12641/** The instruction requires a Pentium (586) or later. */
12642#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12643# define IEMOP_HLP_MIN_586() do { } while (0)
12644#else
12645# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12646#endif
12647
12648/** The instruction requires a PentiumPro (686) or later. */
12649#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12650# define IEMOP_HLP_MIN_686() do { } while (0)
12651#else
12652# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12653#endif
12654
12655
12656/** The instruction raises an \#UD in real and V8086 mode. */
12657#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12658 do \
12659 { \
12660 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12661 else return IEMOP_RAISE_INVALID_OPCODE(); \
12662 } while (0)
12663
12664/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12665 * 64-bit mode. */
12666#define IEMOP_HLP_NO_64BIT() \
12667 do \
12668 { \
12669 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12670 return IEMOP_RAISE_INVALID_OPCODE(); \
12671 } while (0)
12672
12673/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12674 * 64-bit mode. */
12675#define IEMOP_HLP_ONLY_64BIT() \
12676 do \
12677 { \
12678 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12679 return IEMOP_RAISE_INVALID_OPCODE(); \
12680 } while (0)
12681
12682/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12683#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12684 do \
12685 { \
12686 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12687 iemRecalEffOpSize64Default(pVCpu); \
12688 } while (0)
12689
12690/** The instruction has 64-bit operand size if 64-bit mode. */
12691#define IEMOP_HLP_64BIT_OP_SIZE() \
12692 do \
12693 { \
12694 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12695 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12696 } while (0)
12697
12698/** Only a REX prefix immediately preceeding the first opcode byte takes
12699 * effect. This macro helps ensuring this as well as logging bad guest code. */
12700#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12701 do \
12702 { \
12703 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12704 { \
12705 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12706 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12707 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12708 pVCpu->iem.s.uRexB = 0; \
12709 pVCpu->iem.s.uRexIndex = 0; \
12710 pVCpu->iem.s.uRexReg = 0; \
12711 iemRecalEffOpSize(pVCpu); \
12712 } \
12713 } while (0)
12714
12715/**
12716 * Done decoding.
12717 */
12718#define IEMOP_HLP_DONE_DECODING() \
12719 do \
12720 { \
12721 /*nothing for now, maybe later... */ \
12722 } while (0)
12723
12724/**
12725 * Done decoding, raise \#UD exception if lock prefix present.
12726 */
12727#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12728 do \
12729 { \
12730 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12731 { /* likely */ } \
12732 else \
12733 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12734 } while (0)
12735
12736
12737/**
12738 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12739 * repnz or size prefixes are present, or if in real or v8086 mode.
12740 */
12741#define IEMOP_HLP_DONE_VEX_DECODING() \
12742 do \
12743 { \
12744 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12745 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12746 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12747 { /* likely */ } \
12748 else \
12749 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12750 } while (0)
12751
12752/**
12753 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12754 * repnz or size prefixes are present, or if in real or v8086 mode.
12755 */
12756#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12757 do \
12758 { \
12759 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12760 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12761 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12762 && pVCpu->iem.s.uVexLength == 0)) \
12763 { /* likely */ } \
12764 else \
12765 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12766 } while (0)
12767
12768
12769/**
12770 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12771 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12772 * register 0, or if in real or v8086 mode.
12773 */
12774#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12775 do \
12776 { \
12777 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12778 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12779 && !pVCpu->iem.s.uVex3rdReg \
12780 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12781 { /* likely */ } \
12782 else \
12783 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12784 } while (0)
12785
12786/**
12787 * Done decoding VEX, no V, L=0.
12788 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12789 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12790 */
12791#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12792 do \
12793 { \
12794 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12795 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12796 && pVCpu->iem.s.uVexLength == 0 \
12797 && pVCpu->iem.s.uVex3rdReg == 0 \
12798 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12799 { /* likely */ } \
12800 else \
12801 return IEMOP_RAISE_INVALID_OPCODE(); \
12802 } while (0)
12803
12804#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12805 do \
12806 { \
12807 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12808 { /* likely */ } \
12809 else \
12810 { \
12811 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12812 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12813 } \
12814 } while (0)
12815#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12816 do \
12817 { \
12818 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12819 { /* likely */ } \
12820 else \
12821 { \
12822 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12823 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12824 } \
12825 } while (0)
12826
12827/**
12828 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12829 * are present.
12830 */
12831#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12832 do \
12833 { \
12834 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12835 { /* likely */ } \
12836 else \
12837 return IEMOP_RAISE_INVALID_OPCODE(); \
12838 } while (0)
12839
12840
12841#ifdef VBOX_WITH_NESTED_HWVIRT
12842/** Check and handles SVM nested-guest control & instruction intercept. */
12843# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12844 do \
12845 { \
12846 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12847 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12848 } while (0)
12849
12850/** Check and handle SVM nested-guest CR0 read intercept. */
12851# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12852 do \
12853 { \
12854 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12855 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12856 } while (0)
12857
12858#else /* !VBOX_WITH_NESTED_HWVIRT */
12859# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12860# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12861#endif /* !VBOX_WITH_NESTED_HWVIRT */
12862
12863
12864/**
12865 * Calculates the effective address of a ModR/M memory operand.
12866 *
12867 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12868 *
12869 * @return Strict VBox status code.
12870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12871 * @param bRm The ModRM byte.
12872 * @param cbImm The size of any immediate following the
12873 * effective address opcode bytes. Important for
12874 * RIP relative addressing.
12875 * @param pGCPtrEff Where to return the effective address.
12876 */
12877IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12878{
12879 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12880 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12881# define SET_SS_DEF() \
12882 do \
12883 { \
12884 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12885 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12886 } while (0)
12887
12888 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12889 {
12890/** @todo Check the effective address size crap! */
12891 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12892 {
12893 uint16_t u16EffAddr;
12894
12895 /* Handle the disp16 form with no registers first. */
12896 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12897 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12898 else
12899 {
12900 /* Get the displacment. */
12901 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12902 {
12903 case 0: u16EffAddr = 0; break;
12904 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12905 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12906 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12907 }
12908
12909 /* Add the base and index registers to the disp. */
12910 switch (bRm & X86_MODRM_RM_MASK)
12911 {
12912 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12913 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12914 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12915 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12916 case 4: u16EffAddr += pCtx->si; break;
12917 case 5: u16EffAddr += pCtx->di; break;
12918 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12919 case 7: u16EffAddr += pCtx->bx; break;
12920 }
12921 }
12922
12923 *pGCPtrEff = u16EffAddr;
12924 }
12925 else
12926 {
12927 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12928 uint32_t u32EffAddr;
12929
12930 /* Handle the disp32 form with no registers first. */
12931 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12932 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12933 else
12934 {
12935 /* Get the register (or SIB) value. */
12936 switch ((bRm & X86_MODRM_RM_MASK))
12937 {
12938 case 0: u32EffAddr = pCtx->eax; break;
12939 case 1: u32EffAddr = pCtx->ecx; break;
12940 case 2: u32EffAddr = pCtx->edx; break;
12941 case 3: u32EffAddr = pCtx->ebx; break;
12942 case 4: /* SIB */
12943 {
12944 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12945
12946 /* Get the index and scale it. */
12947 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12948 {
12949 case 0: u32EffAddr = pCtx->eax; break;
12950 case 1: u32EffAddr = pCtx->ecx; break;
12951 case 2: u32EffAddr = pCtx->edx; break;
12952 case 3: u32EffAddr = pCtx->ebx; break;
12953 case 4: u32EffAddr = 0; /*none */ break;
12954 case 5: u32EffAddr = pCtx->ebp; break;
12955 case 6: u32EffAddr = pCtx->esi; break;
12956 case 7: u32EffAddr = pCtx->edi; break;
12957 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12958 }
12959 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12960
12961 /* add base */
12962 switch (bSib & X86_SIB_BASE_MASK)
12963 {
12964 case 0: u32EffAddr += pCtx->eax; break;
12965 case 1: u32EffAddr += pCtx->ecx; break;
12966 case 2: u32EffAddr += pCtx->edx; break;
12967 case 3: u32EffAddr += pCtx->ebx; break;
12968 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12969 case 5:
12970 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12971 {
12972 u32EffAddr += pCtx->ebp;
12973 SET_SS_DEF();
12974 }
12975 else
12976 {
12977 uint32_t u32Disp;
12978 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12979 u32EffAddr += u32Disp;
12980 }
12981 break;
12982 case 6: u32EffAddr += pCtx->esi; break;
12983 case 7: u32EffAddr += pCtx->edi; break;
12984 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12985 }
12986 break;
12987 }
12988 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12989 case 6: u32EffAddr = pCtx->esi; break;
12990 case 7: u32EffAddr = pCtx->edi; break;
12991 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12992 }
12993
12994 /* Get and add the displacement. */
12995 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12996 {
12997 case 0:
12998 break;
12999 case 1:
13000 {
13001 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13002 u32EffAddr += i8Disp;
13003 break;
13004 }
13005 case 2:
13006 {
13007 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13008 u32EffAddr += u32Disp;
13009 break;
13010 }
13011 default:
13012 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13013 }
13014
13015 }
13016 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13017 *pGCPtrEff = u32EffAddr;
13018 else
13019 {
13020 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13021 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13022 }
13023 }
13024 }
13025 else
13026 {
13027 uint64_t u64EffAddr;
13028
13029 /* Handle the rip+disp32 form with no registers first. */
13030 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13031 {
13032 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13033 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13034 }
13035 else
13036 {
13037 /* Get the register (or SIB) value. */
13038 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13039 {
13040 case 0: u64EffAddr = pCtx->rax; break;
13041 case 1: u64EffAddr = pCtx->rcx; break;
13042 case 2: u64EffAddr = pCtx->rdx; break;
13043 case 3: u64EffAddr = pCtx->rbx; break;
13044 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13045 case 6: u64EffAddr = pCtx->rsi; break;
13046 case 7: u64EffAddr = pCtx->rdi; break;
13047 case 8: u64EffAddr = pCtx->r8; break;
13048 case 9: u64EffAddr = pCtx->r9; break;
13049 case 10: u64EffAddr = pCtx->r10; break;
13050 case 11: u64EffAddr = pCtx->r11; break;
13051 case 13: u64EffAddr = pCtx->r13; break;
13052 case 14: u64EffAddr = pCtx->r14; break;
13053 case 15: u64EffAddr = pCtx->r15; break;
13054 /* SIB */
13055 case 4:
13056 case 12:
13057 {
13058 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13059
13060 /* Get the index and scale it. */
13061 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13062 {
13063 case 0: u64EffAddr = pCtx->rax; break;
13064 case 1: u64EffAddr = pCtx->rcx; break;
13065 case 2: u64EffAddr = pCtx->rdx; break;
13066 case 3: u64EffAddr = pCtx->rbx; break;
13067 case 4: u64EffAddr = 0; /*none */ break;
13068 case 5: u64EffAddr = pCtx->rbp; break;
13069 case 6: u64EffAddr = pCtx->rsi; break;
13070 case 7: u64EffAddr = pCtx->rdi; break;
13071 case 8: u64EffAddr = pCtx->r8; break;
13072 case 9: u64EffAddr = pCtx->r9; break;
13073 case 10: u64EffAddr = pCtx->r10; break;
13074 case 11: u64EffAddr = pCtx->r11; break;
13075 case 12: u64EffAddr = pCtx->r12; break;
13076 case 13: u64EffAddr = pCtx->r13; break;
13077 case 14: u64EffAddr = pCtx->r14; break;
13078 case 15: u64EffAddr = pCtx->r15; break;
13079 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13080 }
13081 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13082
13083 /* add base */
13084 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13085 {
13086 case 0: u64EffAddr += pCtx->rax; break;
13087 case 1: u64EffAddr += pCtx->rcx; break;
13088 case 2: u64EffAddr += pCtx->rdx; break;
13089 case 3: u64EffAddr += pCtx->rbx; break;
13090 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13091 case 6: u64EffAddr += pCtx->rsi; break;
13092 case 7: u64EffAddr += pCtx->rdi; break;
13093 case 8: u64EffAddr += pCtx->r8; break;
13094 case 9: u64EffAddr += pCtx->r9; break;
13095 case 10: u64EffAddr += pCtx->r10; break;
13096 case 11: u64EffAddr += pCtx->r11; break;
13097 case 12: u64EffAddr += pCtx->r12; break;
13098 case 14: u64EffAddr += pCtx->r14; break;
13099 case 15: u64EffAddr += pCtx->r15; break;
13100 /* complicated encodings */
13101 case 5:
13102 case 13:
13103 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13104 {
13105 if (!pVCpu->iem.s.uRexB)
13106 {
13107 u64EffAddr += pCtx->rbp;
13108 SET_SS_DEF();
13109 }
13110 else
13111 u64EffAddr += pCtx->r13;
13112 }
13113 else
13114 {
13115 uint32_t u32Disp;
13116 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13117 u64EffAddr += (int32_t)u32Disp;
13118 }
13119 break;
13120 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13121 }
13122 break;
13123 }
13124 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13125 }
13126
13127 /* Get and add the displacement. */
13128 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13129 {
13130 case 0:
13131 break;
13132 case 1:
13133 {
13134 int8_t i8Disp;
13135 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13136 u64EffAddr += i8Disp;
13137 break;
13138 }
13139 case 2:
13140 {
13141 uint32_t u32Disp;
13142 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13143 u64EffAddr += (int32_t)u32Disp;
13144 break;
13145 }
13146 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13147 }
13148
13149 }
13150
13151 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13152 *pGCPtrEff = u64EffAddr;
13153 else
13154 {
13155 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13156 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13157 }
13158 }
13159
13160 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13161 return VINF_SUCCESS;
13162}
13163
13164
13165/**
13166 * Calculates the effective address of a ModR/M memory operand.
13167 *
13168 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13169 *
13170 * @return Strict VBox status code.
13171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13172 * @param bRm The ModRM byte.
13173 * @param cbImm The size of any immediate following the
13174 * effective address opcode bytes. Important for
13175 * RIP relative addressing.
13176 * @param pGCPtrEff Where to return the effective address.
13177 * @param offRsp RSP displacement.
13178 */
13179IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13180{
13181 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13182 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13183# define SET_SS_DEF() \
13184 do \
13185 { \
13186 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13187 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13188 } while (0)
13189
13190 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13191 {
13192/** @todo Check the effective address size crap! */
13193 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13194 {
13195 uint16_t u16EffAddr;
13196
13197 /* Handle the disp16 form with no registers first. */
13198 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13199 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13200 else
13201 {
13202 /* Get the displacment. */
13203 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13204 {
13205 case 0: u16EffAddr = 0; break;
13206 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13207 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13208 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13209 }
13210
13211 /* Add the base and index registers to the disp. */
13212 switch (bRm & X86_MODRM_RM_MASK)
13213 {
13214 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13215 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13216 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13217 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13218 case 4: u16EffAddr += pCtx->si; break;
13219 case 5: u16EffAddr += pCtx->di; break;
13220 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13221 case 7: u16EffAddr += pCtx->bx; break;
13222 }
13223 }
13224
13225 *pGCPtrEff = u16EffAddr;
13226 }
13227 else
13228 {
13229 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13230 uint32_t u32EffAddr;
13231
13232 /* Handle the disp32 form with no registers first. */
13233 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13234 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13235 else
13236 {
13237 /* Get the register (or SIB) value. */
13238 switch ((bRm & X86_MODRM_RM_MASK))
13239 {
13240 case 0: u32EffAddr = pCtx->eax; break;
13241 case 1: u32EffAddr = pCtx->ecx; break;
13242 case 2: u32EffAddr = pCtx->edx; break;
13243 case 3: u32EffAddr = pCtx->ebx; break;
13244 case 4: /* SIB */
13245 {
13246 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13247
13248 /* Get the index and scale it. */
13249 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13250 {
13251 case 0: u32EffAddr = pCtx->eax; break;
13252 case 1: u32EffAddr = pCtx->ecx; break;
13253 case 2: u32EffAddr = pCtx->edx; break;
13254 case 3: u32EffAddr = pCtx->ebx; break;
13255 case 4: u32EffAddr = 0; /*none */ break;
13256 case 5: u32EffAddr = pCtx->ebp; break;
13257 case 6: u32EffAddr = pCtx->esi; break;
13258 case 7: u32EffAddr = pCtx->edi; break;
13259 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13260 }
13261 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13262
13263 /* add base */
13264 switch (bSib & X86_SIB_BASE_MASK)
13265 {
13266 case 0: u32EffAddr += pCtx->eax; break;
13267 case 1: u32EffAddr += pCtx->ecx; break;
13268 case 2: u32EffAddr += pCtx->edx; break;
13269 case 3: u32EffAddr += pCtx->ebx; break;
13270 case 4:
13271 u32EffAddr += pCtx->esp + offRsp;
13272 SET_SS_DEF();
13273 break;
13274 case 5:
13275 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13276 {
13277 u32EffAddr += pCtx->ebp;
13278 SET_SS_DEF();
13279 }
13280 else
13281 {
13282 uint32_t u32Disp;
13283 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13284 u32EffAddr += u32Disp;
13285 }
13286 break;
13287 case 6: u32EffAddr += pCtx->esi; break;
13288 case 7: u32EffAddr += pCtx->edi; break;
13289 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13290 }
13291 break;
13292 }
13293 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13294 case 6: u32EffAddr = pCtx->esi; break;
13295 case 7: u32EffAddr = pCtx->edi; break;
13296 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13297 }
13298
13299 /* Get and add the displacement. */
13300 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13301 {
13302 case 0:
13303 break;
13304 case 1:
13305 {
13306 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13307 u32EffAddr += i8Disp;
13308 break;
13309 }
13310 case 2:
13311 {
13312 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13313 u32EffAddr += u32Disp;
13314 break;
13315 }
13316 default:
13317 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13318 }
13319
13320 }
13321 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13322 *pGCPtrEff = u32EffAddr;
13323 else
13324 {
13325 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13326 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13327 }
13328 }
13329 }
13330 else
13331 {
13332 uint64_t u64EffAddr;
13333
13334 /* Handle the rip+disp32 form with no registers first. */
13335 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13336 {
13337 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13338 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13339 }
13340 else
13341 {
13342 /* Get the register (or SIB) value. */
13343 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13344 {
13345 case 0: u64EffAddr = pCtx->rax; break;
13346 case 1: u64EffAddr = pCtx->rcx; break;
13347 case 2: u64EffAddr = pCtx->rdx; break;
13348 case 3: u64EffAddr = pCtx->rbx; break;
13349 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13350 case 6: u64EffAddr = pCtx->rsi; break;
13351 case 7: u64EffAddr = pCtx->rdi; break;
13352 case 8: u64EffAddr = pCtx->r8; break;
13353 case 9: u64EffAddr = pCtx->r9; break;
13354 case 10: u64EffAddr = pCtx->r10; break;
13355 case 11: u64EffAddr = pCtx->r11; break;
13356 case 13: u64EffAddr = pCtx->r13; break;
13357 case 14: u64EffAddr = pCtx->r14; break;
13358 case 15: u64EffAddr = pCtx->r15; break;
13359 /* SIB */
13360 case 4:
13361 case 12:
13362 {
13363 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13364
13365 /* Get the index and scale it. */
13366 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13367 {
13368 case 0: u64EffAddr = pCtx->rax; break;
13369 case 1: u64EffAddr = pCtx->rcx; break;
13370 case 2: u64EffAddr = pCtx->rdx; break;
13371 case 3: u64EffAddr = pCtx->rbx; break;
13372 case 4: u64EffAddr = 0; /*none */ break;
13373 case 5: u64EffAddr = pCtx->rbp; break;
13374 case 6: u64EffAddr = pCtx->rsi; break;
13375 case 7: u64EffAddr = pCtx->rdi; break;
13376 case 8: u64EffAddr = pCtx->r8; break;
13377 case 9: u64EffAddr = pCtx->r9; break;
13378 case 10: u64EffAddr = pCtx->r10; break;
13379 case 11: u64EffAddr = pCtx->r11; break;
13380 case 12: u64EffAddr = pCtx->r12; break;
13381 case 13: u64EffAddr = pCtx->r13; break;
13382 case 14: u64EffAddr = pCtx->r14; break;
13383 case 15: u64EffAddr = pCtx->r15; break;
13384 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13385 }
13386 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13387
13388 /* add base */
13389 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13390 {
13391 case 0: u64EffAddr += pCtx->rax; break;
13392 case 1: u64EffAddr += pCtx->rcx; break;
13393 case 2: u64EffAddr += pCtx->rdx; break;
13394 case 3: u64EffAddr += pCtx->rbx; break;
13395 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13396 case 6: u64EffAddr += pCtx->rsi; break;
13397 case 7: u64EffAddr += pCtx->rdi; break;
13398 case 8: u64EffAddr += pCtx->r8; break;
13399 case 9: u64EffAddr += pCtx->r9; break;
13400 case 10: u64EffAddr += pCtx->r10; break;
13401 case 11: u64EffAddr += pCtx->r11; break;
13402 case 12: u64EffAddr += pCtx->r12; break;
13403 case 14: u64EffAddr += pCtx->r14; break;
13404 case 15: u64EffAddr += pCtx->r15; break;
13405 /* complicated encodings */
13406 case 5:
13407 case 13:
13408 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13409 {
13410 if (!pVCpu->iem.s.uRexB)
13411 {
13412 u64EffAddr += pCtx->rbp;
13413 SET_SS_DEF();
13414 }
13415 else
13416 u64EffAddr += pCtx->r13;
13417 }
13418 else
13419 {
13420 uint32_t u32Disp;
13421 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13422 u64EffAddr += (int32_t)u32Disp;
13423 }
13424 break;
13425 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13426 }
13427 break;
13428 }
13429 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13430 }
13431
13432 /* Get and add the displacement. */
13433 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13434 {
13435 case 0:
13436 break;
13437 case 1:
13438 {
13439 int8_t i8Disp;
13440 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13441 u64EffAddr += i8Disp;
13442 break;
13443 }
13444 case 2:
13445 {
13446 uint32_t u32Disp;
13447 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13448 u64EffAddr += (int32_t)u32Disp;
13449 break;
13450 }
13451 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13452 }
13453
13454 }
13455
13456 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13457 *pGCPtrEff = u64EffAddr;
13458 else
13459 {
13460 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13461 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13462 }
13463 }
13464
13465 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13466 return VINF_SUCCESS;
13467}
13468
13469
13470#ifdef IEM_WITH_SETJMP
13471/**
13472 * Calculates the effective address of a ModR/M memory operand.
13473 *
13474 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13475 *
13476 * May longjmp on internal error.
13477 *
13478 * @return The effective address.
13479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13480 * @param bRm The ModRM byte.
13481 * @param cbImm The size of any immediate following the
13482 * effective address opcode bytes. Important for
13483 * RIP relative addressing.
13484 */
13485IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13486{
13487 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13488 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13489# define SET_SS_DEF() \
13490 do \
13491 { \
13492 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13493 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13494 } while (0)
13495
13496 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13497 {
13498/** @todo Check the effective address size crap! */
13499 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13500 {
13501 uint16_t u16EffAddr;
13502
13503 /* Handle the disp16 form with no registers first. */
13504 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13505 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13506 else
13507 {
13508 /* Get the displacment. */
13509 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13510 {
13511 case 0: u16EffAddr = 0; break;
13512 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13513 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13514 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13515 }
13516
13517 /* Add the base and index registers to the disp. */
13518 switch (bRm & X86_MODRM_RM_MASK)
13519 {
13520 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13521 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13522 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13523 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13524 case 4: u16EffAddr += pCtx->si; break;
13525 case 5: u16EffAddr += pCtx->di; break;
13526 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13527 case 7: u16EffAddr += pCtx->bx; break;
13528 }
13529 }
13530
13531 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13532 return u16EffAddr;
13533 }
13534
13535 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13536 uint32_t u32EffAddr;
13537
13538 /* Handle the disp32 form with no registers first. */
13539 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13540 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13541 else
13542 {
13543 /* Get the register (or SIB) value. */
13544 switch ((bRm & X86_MODRM_RM_MASK))
13545 {
13546 case 0: u32EffAddr = pCtx->eax; break;
13547 case 1: u32EffAddr = pCtx->ecx; break;
13548 case 2: u32EffAddr = pCtx->edx; break;
13549 case 3: u32EffAddr = pCtx->ebx; break;
13550 case 4: /* SIB */
13551 {
13552 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13553
13554 /* Get the index and scale it. */
13555 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13556 {
13557 case 0: u32EffAddr = pCtx->eax; break;
13558 case 1: u32EffAddr = pCtx->ecx; break;
13559 case 2: u32EffAddr = pCtx->edx; break;
13560 case 3: u32EffAddr = pCtx->ebx; break;
13561 case 4: u32EffAddr = 0; /*none */ break;
13562 case 5: u32EffAddr = pCtx->ebp; break;
13563 case 6: u32EffAddr = pCtx->esi; break;
13564 case 7: u32EffAddr = pCtx->edi; break;
13565 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13566 }
13567 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13568
13569 /* add base */
13570 switch (bSib & X86_SIB_BASE_MASK)
13571 {
13572 case 0: u32EffAddr += pCtx->eax; break;
13573 case 1: u32EffAddr += pCtx->ecx; break;
13574 case 2: u32EffAddr += pCtx->edx; break;
13575 case 3: u32EffAddr += pCtx->ebx; break;
13576 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13577 case 5:
13578 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13579 {
13580 u32EffAddr += pCtx->ebp;
13581 SET_SS_DEF();
13582 }
13583 else
13584 {
13585 uint32_t u32Disp;
13586 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13587 u32EffAddr += u32Disp;
13588 }
13589 break;
13590 case 6: u32EffAddr += pCtx->esi; break;
13591 case 7: u32EffAddr += pCtx->edi; break;
13592 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13593 }
13594 break;
13595 }
13596 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13597 case 6: u32EffAddr = pCtx->esi; break;
13598 case 7: u32EffAddr = pCtx->edi; break;
13599 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13600 }
13601
13602 /* Get and add the displacement. */
13603 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13604 {
13605 case 0:
13606 break;
13607 case 1:
13608 {
13609 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13610 u32EffAddr += i8Disp;
13611 break;
13612 }
13613 case 2:
13614 {
13615 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13616 u32EffAddr += u32Disp;
13617 break;
13618 }
13619 default:
13620 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13621 }
13622 }
13623
13624 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13625 {
13626 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13627 return u32EffAddr;
13628 }
13629 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13630 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13631 return u32EffAddr & UINT16_MAX;
13632 }
13633
13634 uint64_t u64EffAddr;
13635
13636 /* Handle the rip+disp32 form with no registers first. */
13637 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13638 {
13639 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13640 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13641 }
13642 else
13643 {
13644 /* Get the register (or SIB) value. */
13645 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13646 {
13647 case 0: u64EffAddr = pCtx->rax; break;
13648 case 1: u64EffAddr = pCtx->rcx; break;
13649 case 2: u64EffAddr = pCtx->rdx; break;
13650 case 3: u64EffAddr = pCtx->rbx; break;
13651 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13652 case 6: u64EffAddr = pCtx->rsi; break;
13653 case 7: u64EffAddr = pCtx->rdi; break;
13654 case 8: u64EffAddr = pCtx->r8; break;
13655 case 9: u64EffAddr = pCtx->r9; break;
13656 case 10: u64EffAddr = pCtx->r10; break;
13657 case 11: u64EffAddr = pCtx->r11; break;
13658 case 13: u64EffAddr = pCtx->r13; break;
13659 case 14: u64EffAddr = pCtx->r14; break;
13660 case 15: u64EffAddr = pCtx->r15; break;
13661 /* SIB */
13662 case 4:
13663 case 12:
13664 {
13665 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13666
13667 /* Get the index and scale it. */
13668 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13669 {
13670 case 0: u64EffAddr = pCtx->rax; break;
13671 case 1: u64EffAddr = pCtx->rcx; break;
13672 case 2: u64EffAddr = pCtx->rdx; break;
13673 case 3: u64EffAddr = pCtx->rbx; break;
13674 case 4: u64EffAddr = 0; /*none */ break;
13675 case 5: u64EffAddr = pCtx->rbp; break;
13676 case 6: u64EffAddr = pCtx->rsi; break;
13677 case 7: u64EffAddr = pCtx->rdi; break;
13678 case 8: u64EffAddr = pCtx->r8; break;
13679 case 9: u64EffAddr = pCtx->r9; break;
13680 case 10: u64EffAddr = pCtx->r10; break;
13681 case 11: u64EffAddr = pCtx->r11; break;
13682 case 12: u64EffAddr = pCtx->r12; break;
13683 case 13: u64EffAddr = pCtx->r13; break;
13684 case 14: u64EffAddr = pCtx->r14; break;
13685 case 15: u64EffAddr = pCtx->r15; break;
13686 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13687 }
13688 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13689
13690 /* add base */
13691 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13692 {
13693 case 0: u64EffAddr += pCtx->rax; break;
13694 case 1: u64EffAddr += pCtx->rcx; break;
13695 case 2: u64EffAddr += pCtx->rdx; break;
13696 case 3: u64EffAddr += pCtx->rbx; break;
13697 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13698 case 6: u64EffAddr += pCtx->rsi; break;
13699 case 7: u64EffAddr += pCtx->rdi; break;
13700 case 8: u64EffAddr += pCtx->r8; break;
13701 case 9: u64EffAddr += pCtx->r9; break;
13702 case 10: u64EffAddr += pCtx->r10; break;
13703 case 11: u64EffAddr += pCtx->r11; break;
13704 case 12: u64EffAddr += pCtx->r12; break;
13705 case 14: u64EffAddr += pCtx->r14; break;
13706 case 15: u64EffAddr += pCtx->r15; break;
13707 /* complicated encodings */
13708 case 5:
13709 case 13:
13710 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13711 {
13712 if (!pVCpu->iem.s.uRexB)
13713 {
13714 u64EffAddr += pCtx->rbp;
13715 SET_SS_DEF();
13716 }
13717 else
13718 u64EffAddr += pCtx->r13;
13719 }
13720 else
13721 {
13722 uint32_t u32Disp;
13723 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13724 u64EffAddr += (int32_t)u32Disp;
13725 }
13726 break;
13727 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13728 }
13729 break;
13730 }
13731 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13732 }
13733
13734 /* Get and add the displacement. */
13735 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13736 {
13737 case 0:
13738 break;
13739 case 1:
13740 {
13741 int8_t i8Disp;
13742 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13743 u64EffAddr += i8Disp;
13744 break;
13745 }
13746 case 2:
13747 {
13748 uint32_t u32Disp;
13749 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13750 u64EffAddr += (int32_t)u32Disp;
13751 break;
13752 }
13753 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13754 }
13755
13756 }
13757
13758 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13759 {
13760 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13761 return u64EffAddr;
13762 }
13763 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13764 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13765 return u64EffAddr & UINT32_MAX;
13766}
13767#endif /* IEM_WITH_SETJMP */
13768
13769
13770/** @} */
13771
13772
13773
13774/*
13775 * Include the instructions
13776 */
13777#include "IEMAllInstructions.cpp.h"
13778
13779
13780
13781
13782#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13783
13784/**
13785 * Sets up execution verification mode.
13786 */
13787IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13788{
13789 PVMCPU pVCpu = pVCpu;
13790 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13791
13792 /*
13793 * Always note down the address of the current instruction.
13794 */
13795 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13796 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13797
13798 /*
13799 * Enable verification and/or logging.
13800 */
13801 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13802 if ( fNewNoRem
13803 && ( 0
13804#if 0 /* auto enable on first paged protected mode interrupt */
13805 || ( pOrgCtx->eflags.Bits.u1IF
13806 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13807 && TRPMHasTrap(pVCpu)
13808 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13809#endif
13810#if 0
13811 || ( pOrgCtx->cs == 0x10
13812 && ( pOrgCtx->rip == 0x90119e3e
13813 || pOrgCtx->rip == 0x901d9810)
13814#endif
13815#if 0 /* Auto enable DSL - FPU stuff. */
13816 || ( pOrgCtx->cs == 0x10
13817 && (// pOrgCtx->rip == 0xc02ec07f
13818 //|| pOrgCtx->rip == 0xc02ec082
13819 //|| pOrgCtx->rip == 0xc02ec0c9
13820 0
13821 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13822#endif
13823#if 0 /* Auto enable DSL - fstp st0 stuff. */
13824 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13825#endif
13826#if 0
13827 || pOrgCtx->rip == 0x9022bb3a
13828#endif
13829#if 0
13830 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13831#endif
13832#if 0
13833 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13834 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13835#endif
13836#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13837 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13838 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13839 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13840#endif
13841#if 0 /* NT4SP1 - xadd early boot. */
13842 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13843#endif
13844#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13845 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13846#endif
13847#if 0 /* NT4SP1 - cmpxchg (AMD). */
13848 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13849#endif
13850#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13851 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13852#endif
13853#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13854 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13855
13856#endif
13857#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13858 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13859
13860#endif
13861#if 0 /* NT4SP1 - frstor [ecx] */
13862 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13863#endif
13864#if 0 /* xxxxxx - All long mode code. */
13865 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13866#endif
13867#if 0 /* rep movsq linux 3.7 64-bit boot. */
13868 || (pOrgCtx->rip == 0x0000000000100241)
13869#endif
13870#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13871 || (pOrgCtx->rip == 0x000000000215e240)
13872#endif
13873#if 0 /* DOS's size-overridden iret to v8086. */
13874 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13875#endif
13876 )
13877 )
13878 {
13879 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13880 RTLogFlags(NULL, "enabled");
13881 fNewNoRem = false;
13882 }
13883 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13884 {
13885 pVCpu->iem.s.fNoRem = fNewNoRem;
13886 if (!fNewNoRem)
13887 {
13888 LogAlways(("Enabling verification mode!\n"));
13889 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13890 }
13891 else
13892 LogAlways(("Disabling verification mode!\n"));
13893 }
13894
13895 /*
13896 * Switch state.
13897 */
13898 if (IEM_VERIFICATION_ENABLED(pVCpu))
13899 {
13900 static CPUMCTX s_DebugCtx; /* Ugly! */
13901
13902 s_DebugCtx = *pOrgCtx;
13903 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13904 }
13905
13906 /*
13907 * See if there is an interrupt pending in TRPM and inject it if we can.
13908 */
13909 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13910 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
13911#if defined(VBOX_WITH_NESTED_HWVIRT)
13912 bool fIntrEnabled = pOrgCtx->hwvirt.Gif;
13913 if (fIntrEnabled)
13914 {
13915 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
13916 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
13917 else
13918 fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13919 }
13920#else
13921 bool fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13922#endif
13923 if ( fIntrEnabled
13924 && TRPMHasTrap(pVCpu)
13925 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13926 {
13927 uint8_t u8TrapNo;
13928 TRPMEVENT enmType;
13929 RTGCUINT uErrCode;
13930 RTGCPTR uCr2;
13931 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13932 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13933 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13934 TRPMResetTrap(pVCpu);
13935 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13936 }
13937
13938 /*
13939 * Reset the counters.
13940 */
13941 pVCpu->iem.s.cIOReads = 0;
13942 pVCpu->iem.s.cIOWrites = 0;
13943 pVCpu->iem.s.fIgnoreRaxRdx = false;
13944 pVCpu->iem.s.fOverlappingMovs = false;
13945 pVCpu->iem.s.fProblematicMemory = false;
13946 pVCpu->iem.s.fUndefinedEFlags = 0;
13947
13948 if (IEM_VERIFICATION_ENABLED(pVCpu))
13949 {
13950 /*
13951 * Free all verification records.
13952 */
13953 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13954 pVCpu->iem.s.pIemEvtRecHead = NULL;
13955 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13956 do
13957 {
13958 while (pEvtRec)
13959 {
13960 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13961 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13962 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13963 pEvtRec = pNext;
13964 }
13965 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13966 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13967 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13968 } while (pEvtRec);
13969 }
13970}
13971
13972
13973/**
13974 * Allocate an event record.
13975 * @returns Pointer to a record.
13976 */
13977IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13978{
13979 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13980 return NULL;
13981
13982 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13983 if (pEvtRec)
13984 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13985 else
13986 {
13987 if (!pVCpu->iem.s.ppIemEvtRecNext)
13988 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13989
13990 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13991 if (!pEvtRec)
13992 return NULL;
13993 }
13994 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13995 pEvtRec->pNext = NULL;
13996 return pEvtRec;
13997}
13998
13999
14000/**
14001 * IOMMMIORead notification.
14002 */
14003VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
14004{
14005 PVMCPU pVCpu = VMMGetCpu(pVM);
14006 if (!pVCpu)
14007 return;
14008 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14009 if (!pEvtRec)
14010 return;
14011 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
14012 pEvtRec->u.RamRead.GCPhys = GCPhys;
14013 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
14014 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14015 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14016}
14017
14018
14019/**
14020 * IOMMMIOWrite notification.
14021 */
14022VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
14023{
14024 PVMCPU pVCpu = VMMGetCpu(pVM);
14025 if (!pVCpu)
14026 return;
14027 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14028 if (!pEvtRec)
14029 return;
14030 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
14031 pEvtRec->u.RamWrite.GCPhys = GCPhys;
14032 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
14033 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
14034 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
14035 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
14036 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
14037 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14038 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14039}
14040
14041
14042/**
14043 * IOMIOPortRead notification.
14044 */
14045VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
14046{
14047 PVMCPU pVCpu = VMMGetCpu(pVM);
14048 if (!pVCpu)
14049 return;
14050 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14051 if (!pEvtRec)
14052 return;
14053 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14054 pEvtRec->u.IOPortRead.Port = Port;
14055 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14056 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14057 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14058}
14059
14060/**
14061 * IOMIOPortWrite notification.
14062 */
14063VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14064{
14065 PVMCPU pVCpu = VMMGetCpu(pVM);
14066 if (!pVCpu)
14067 return;
14068 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14069 if (!pEvtRec)
14070 return;
14071 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14072 pEvtRec->u.IOPortWrite.Port = Port;
14073 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14074 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14075 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14076 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14077}
14078
14079
14080VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
14081{
14082 PVMCPU pVCpu = VMMGetCpu(pVM);
14083 if (!pVCpu)
14084 return;
14085 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14086 if (!pEvtRec)
14087 return;
14088 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
14089 pEvtRec->u.IOPortStrRead.Port = Port;
14090 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
14091 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
14092 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14093 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14094}
14095
14096
14097VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
14098{
14099 PVMCPU pVCpu = VMMGetCpu(pVM);
14100 if (!pVCpu)
14101 return;
14102 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14103 if (!pEvtRec)
14104 return;
14105 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
14106 pEvtRec->u.IOPortStrWrite.Port = Port;
14107 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
14108 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
14109 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14110 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14111}
14112
14113
14114/**
14115 * Fakes and records an I/O port read.
14116 *
14117 * @returns VINF_SUCCESS.
14118 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14119 * @param Port The I/O port.
14120 * @param pu32Value Where to store the fake value.
14121 * @param cbValue The size of the access.
14122 */
14123IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14124{
14125 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14126 if (pEvtRec)
14127 {
14128 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14129 pEvtRec->u.IOPortRead.Port = Port;
14130 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14131 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14132 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14133 }
14134 pVCpu->iem.s.cIOReads++;
14135 *pu32Value = 0xcccccccc;
14136 return VINF_SUCCESS;
14137}
14138
14139
14140/**
14141 * Fakes and records an I/O port write.
14142 *
14143 * @returns VINF_SUCCESS.
14144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14145 * @param Port The I/O port.
14146 * @param u32Value The value being written.
14147 * @param cbValue The size of the access.
14148 */
14149IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14150{
14151 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14152 if (pEvtRec)
14153 {
14154 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14155 pEvtRec->u.IOPortWrite.Port = Port;
14156 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14157 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14158 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14159 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14160 }
14161 pVCpu->iem.s.cIOWrites++;
14162 return VINF_SUCCESS;
14163}
14164
14165
14166/**
14167 * Used to add extra details about a stub case.
14168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14169 */
14170IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
14171{
14172 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14173 PVM pVM = pVCpu->CTX_SUFF(pVM);
14174 PVMCPU pVCpu = pVCpu;
14175 char szRegs[4096];
14176 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
14177 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
14178 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
14179 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
14180 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
14181 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
14182 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
14183 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
14184 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
14185 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
14186 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
14187 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
14188 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
14189 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
14190 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
14191 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
14192 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
14193 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
14194 " efer=%016VR{efer}\n"
14195 " pat=%016VR{pat}\n"
14196 " sf_mask=%016VR{sf_mask}\n"
14197 "krnl_gs_base=%016VR{krnl_gs_base}\n"
14198 " lstar=%016VR{lstar}\n"
14199 " star=%016VR{star} cstar=%016VR{cstar}\n"
14200 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
14201 );
14202
14203 char szInstr1[256];
14204 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
14205 DBGF_DISAS_FLAGS_DEFAULT_MODE,
14206 szInstr1, sizeof(szInstr1), NULL);
14207 char szInstr2[256];
14208 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
14209 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14210 szInstr2, sizeof(szInstr2), NULL);
14211
14212 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
14213}
14214
14215
14216/**
14217 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
14218 * dump to the assertion info.
14219 *
14220 * @param pEvtRec The record to dump.
14221 */
14222IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
14223{
14224 switch (pEvtRec->enmEvent)
14225 {
14226 case IEMVERIFYEVENT_IOPORT_READ:
14227 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
14228 pEvtRec->u.IOPortWrite.Port,
14229 pEvtRec->u.IOPortWrite.cbValue);
14230 break;
14231 case IEMVERIFYEVENT_IOPORT_WRITE:
14232 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
14233 pEvtRec->u.IOPortWrite.Port,
14234 pEvtRec->u.IOPortWrite.cbValue,
14235 pEvtRec->u.IOPortWrite.u32Value);
14236 break;
14237 case IEMVERIFYEVENT_IOPORT_STR_READ:
14238 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
14239 pEvtRec->u.IOPortStrWrite.Port,
14240 pEvtRec->u.IOPortStrWrite.cbValue,
14241 pEvtRec->u.IOPortStrWrite.cTransfers);
14242 break;
14243 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14244 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
14245 pEvtRec->u.IOPortStrWrite.Port,
14246 pEvtRec->u.IOPortStrWrite.cbValue,
14247 pEvtRec->u.IOPortStrWrite.cTransfers);
14248 break;
14249 case IEMVERIFYEVENT_RAM_READ:
14250 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
14251 pEvtRec->u.RamRead.GCPhys,
14252 pEvtRec->u.RamRead.cb);
14253 break;
14254 case IEMVERIFYEVENT_RAM_WRITE:
14255 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
14256 pEvtRec->u.RamWrite.GCPhys,
14257 pEvtRec->u.RamWrite.cb,
14258 (int)pEvtRec->u.RamWrite.cb,
14259 pEvtRec->u.RamWrite.ab);
14260 break;
14261 default:
14262 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14263 break;
14264 }
14265}
14266
14267
14268/**
14269 * Raises an assertion on the specified record, showing the given message with
14270 * a record dump attached.
14271 *
14272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14273 * @param pEvtRec1 The first record.
14274 * @param pEvtRec2 The second record.
14275 * @param pszMsg The message explaining why we're asserting.
14276 */
14277IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14278{
14279 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14280 iemVerifyAssertAddRecordDump(pEvtRec1);
14281 iemVerifyAssertAddRecordDump(pEvtRec2);
14282 iemVerifyAssertMsg2(pVCpu);
14283 RTAssertPanic();
14284}
14285
14286
14287/**
14288 * Raises an assertion on the specified record, showing the given message with
14289 * a record dump attached.
14290 *
14291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14292 * @param pEvtRec1 The first record.
14293 * @param pszMsg The message explaining why we're asserting.
14294 */
14295IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14296{
14297 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14298 iemVerifyAssertAddRecordDump(pEvtRec);
14299 iemVerifyAssertMsg2(pVCpu);
14300 RTAssertPanic();
14301}
14302
14303
14304/**
14305 * Verifies a write record.
14306 *
14307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14308 * @param pEvtRec The write record.
14309 * @param fRem Set if REM was doing the other executing. If clear
14310 * it was HM.
14311 */
14312IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14313{
14314 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14315 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14316 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14317 if ( RT_FAILURE(rc)
14318 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14319 {
14320 /* fend off ins */
14321 if ( !pVCpu->iem.s.cIOReads
14322 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14323 || ( pEvtRec->u.RamWrite.cb != 1
14324 && pEvtRec->u.RamWrite.cb != 2
14325 && pEvtRec->u.RamWrite.cb != 4) )
14326 {
14327 /* fend off ROMs and MMIO */
14328 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14329 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14330 {
14331 /* fend off fxsave */
14332 if (pEvtRec->u.RamWrite.cb != 512)
14333 {
14334 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14335 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14336 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14337 RTAssertMsg2Add("%s: %.*Rhxs\n"
14338 "iem: %.*Rhxs\n",
14339 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14340 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14341 iemVerifyAssertAddRecordDump(pEvtRec);
14342 iemVerifyAssertMsg2(pVCpu);
14343 RTAssertPanic();
14344 }
14345 }
14346 }
14347 }
14348
14349}
14350
14351/**
14352 * Performs the post-execution verfication checks.
14353 */
14354IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14355{
14356 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14357 return rcStrictIem;
14358
14359 /*
14360 * Switch back the state.
14361 */
14362 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14363 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14364 Assert(pOrgCtx != pDebugCtx);
14365 IEM_GET_CTX(pVCpu) = pOrgCtx;
14366
14367 /*
14368 * Execute the instruction in REM.
14369 */
14370 bool fRem = false;
14371 PVM pVM = pVCpu->CTX_SUFF(pVM);
14372 PVMCPU pVCpu = pVCpu;
14373 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14374#ifdef IEM_VERIFICATION_MODE_FULL_HM
14375 if ( HMIsEnabled(pVM)
14376 && pVCpu->iem.s.cIOReads == 0
14377 && pVCpu->iem.s.cIOWrites == 0
14378 && !pVCpu->iem.s.fProblematicMemory)
14379 {
14380 uint64_t uStartRip = pOrgCtx->rip;
14381 unsigned iLoops = 0;
14382 do
14383 {
14384 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14385 iLoops++;
14386 } while ( rc == VINF_SUCCESS
14387 || ( rc == VINF_EM_DBG_STEPPED
14388 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14389 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14390 || ( pOrgCtx->rip != pDebugCtx->rip
14391 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14392 && iLoops < 8) );
14393 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14394 rc = VINF_SUCCESS;
14395 }
14396#endif
14397 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14398 || rc == VINF_IOM_R3_IOPORT_READ
14399 || rc == VINF_IOM_R3_IOPORT_WRITE
14400 || rc == VINF_IOM_R3_MMIO_READ
14401 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14402 || rc == VINF_IOM_R3_MMIO_WRITE
14403 || rc == VINF_CPUM_R3_MSR_READ
14404 || rc == VINF_CPUM_R3_MSR_WRITE
14405 || rc == VINF_EM_RESCHEDULE
14406 )
14407 {
14408 EMRemLock(pVM);
14409 rc = REMR3EmulateInstruction(pVM, pVCpu);
14410 AssertRC(rc);
14411 EMRemUnlock(pVM);
14412 fRem = true;
14413 }
14414
14415# if 1 /* Skip unimplemented instructions for now. */
14416 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14417 {
14418 IEM_GET_CTX(pVCpu) = pOrgCtx;
14419 if (rc == VINF_EM_DBG_STEPPED)
14420 return VINF_SUCCESS;
14421 return rc;
14422 }
14423# endif
14424
14425 /*
14426 * Compare the register states.
14427 */
14428 unsigned cDiffs = 0;
14429 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14430 {
14431 //Log(("REM and IEM ends up with different registers!\n"));
14432 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14433
14434# define CHECK_FIELD(a_Field) \
14435 do \
14436 { \
14437 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14438 { \
14439 switch (sizeof(pOrgCtx->a_Field)) \
14440 { \
14441 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14442 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14443 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14444 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14445 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14446 } \
14447 cDiffs++; \
14448 } \
14449 } while (0)
14450# define CHECK_XSTATE_FIELD(a_Field) \
14451 do \
14452 { \
14453 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14454 { \
14455 switch (sizeof(pOrgXState->a_Field)) \
14456 { \
14457 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14458 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14459 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14460 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14461 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14462 } \
14463 cDiffs++; \
14464 } \
14465 } while (0)
14466
14467# define CHECK_BIT_FIELD(a_Field) \
14468 do \
14469 { \
14470 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14471 { \
14472 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14473 cDiffs++; \
14474 } \
14475 } while (0)
14476
14477# define CHECK_SEL(a_Sel) \
14478 do \
14479 { \
14480 CHECK_FIELD(a_Sel.Sel); \
14481 CHECK_FIELD(a_Sel.Attr.u); \
14482 CHECK_FIELD(a_Sel.u64Base); \
14483 CHECK_FIELD(a_Sel.u32Limit); \
14484 CHECK_FIELD(a_Sel.fFlags); \
14485 } while (0)
14486
14487 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14488 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14489
14490#if 1 /* The recompiler doesn't update these the intel way. */
14491 if (fRem)
14492 {
14493 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14494 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14495 pOrgXState->x87.CS = pDebugXState->x87.CS;
14496 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14497 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14498 pOrgXState->x87.DS = pDebugXState->x87.DS;
14499 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14500 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14501 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14502 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14503 }
14504#endif
14505 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14506 {
14507 RTAssertMsg2Weak(" the FPU state differs\n");
14508 cDiffs++;
14509 CHECK_XSTATE_FIELD(x87.FCW);
14510 CHECK_XSTATE_FIELD(x87.FSW);
14511 CHECK_XSTATE_FIELD(x87.FTW);
14512 CHECK_XSTATE_FIELD(x87.FOP);
14513 CHECK_XSTATE_FIELD(x87.FPUIP);
14514 CHECK_XSTATE_FIELD(x87.CS);
14515 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14516 CHECK_XSTATE_FIELD(x87.FPUDP);
14517 CHECK_XSTATE_FIELD(x87.DS);
14518 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14519 CHECK_XSTATE_FIELD(x87.MXCSR);
14520 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14521 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14522 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14523 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14524 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14525 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14526 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14527 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14528 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14529 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14530 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14531 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14532 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14533 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14534 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14535 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14536 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14537 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14538 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14539 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14540 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14541 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14542 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14543 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14544 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14545 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14546 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14547 }
14548 CHECK_FIELD(rip);
14549 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14550 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14551 {
14552 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14553 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14554 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14555 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14556 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14557 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14558 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14559 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14560 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14561 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14562 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14563 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14564 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14565 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14566 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14567 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14568 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14569 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14570 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14571 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14572 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14573 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14574 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14575 }
14576
14577 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14578 CHECK_FIELD(rax);
14579 CHECK_FIELD(rcx);
14580 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14581 CHECK_FIELD(rdx);
14582 CHECK_FIELD(rbx);
14583 CHECK_FIELD(rsp);
14584 CHECK_FIELD(rbp);
14585 CHECK_FIELD(rsi);
14586 CHECK_FIELD(rdi);
14587 CHECK_FIELD(r8);
14588 CHECK_FIELD(r9);
14589 CHECK_FIELD(r10);
14590 CHECK_FIELD(r11);
14591 CHECK_FIELD(r12);
14592 CHECK_FIELD(r13);
14593 CHECK_SEL(cs);
14594 CHECK_SEL(ss);
14595 CHECK_SEL(ds);
14596 CHECK_SEL(es);
14597 CHECK_SEL(fs);
14598 CHECK_SEL(gs);
14599 CHECK_FIELD(cr0);
14600
14601 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14602 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14603 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14604 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14605 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14606 {
14607 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14608 { /* ignore */ }
14609 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14610 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14611 && fRem)
14612 { /* ignore */ }
14613 else
14614 CHECK_FIELD(cr2);
14615 }
14616 CHECK_FIELD(cr3);
14617 CHECK_FIELD(cr4);
14618 CHECK_FIELD(dr[0]);
14619 CHECK_FIELD(dr[1]);
14620 CHECK_FIELD(dr[2]);
14621 CHECK_FIELD(dr[3]);
14622 CHECK_FIELD(dr[6]);
14623 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14624 CHECK_FIELD(dr[7]);
14625 CHECK_FIELD(gdtr.cbGdt);
14626 CHECK_FIELD(gdtr.pGdt);
14627 CHECK_FIELD(idtr.cbIdt);
14628 CHECK_FIELD(idtr.pIdt);
14629 CHECK_SEL(ldtr);
14630 CHECK_SEL(tr);
14631 CHECK_FIELD(SysEnter.cs);
14632 CHECK_FIELD(SysEnter.eip);
14633 CHECK_FIELD(SysEnter.esp);
14634 CHECK_FIELD(msrEFER);
14635 CHECK_FIELD(msrSTAR);
14636 CHECK_FIELD(msrPAT);
14637 CHECK_FIELD(msrLSTAR);
14638 CHECK_FIELD(msrCSTAR);
14639 CHECK_FIELD(msrSFMASK);
14640 CHECK_FIELD(msrKERNELGSBASE);
14641
14642 if (cDiffs != 0)
14643 {
14644 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14645 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14646 RTAssertPanic();
14647 static bool volatile s_fEnterDebugger = true;
14648 if (s_fEnterDebugger)
14649 DBGFSTOP(pVM);
14650
14651# if 1 /* Ignore unimplemented instructions for now. */
14652 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14653 rcStrictIem = VINF_SUCCESS;
14654# endif
14655 }
14656# undef CHECK_FIELD
14657# undef CHECK_BIT_FIELD
14658 }
14659
14660 /*
14661 * If the register state compared fine, check the verification event
14662 * records.
14663 */
14664 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14665 {
14666 /*
14667 * Compare verficiation event records.
14668 * - I/O port accesses should be a 1:1 match.
14669 */
14670 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14671 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14672 while (pIemRec && pOtherRec)
14673 {
14674 /* Since we might miss RAM writes and reads, ignore reads and check
14675 that any written memory is the same extra ones. */
14676 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14677 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14678 && pIemRec->pNext)
14679 {
14680 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14681 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14682 pIemRec = pIemRec->pNext;
14683 }
14684
14685 /* Do the compare. */
14686 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14687 {
14688 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14689 break;
14690 }
14691 bool fEquals;
14692 switch (pIemRec->enmEvent)
14693 {
14694 case IEMVERIFYEVENT_IOPORT_READ:
14695 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14696 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14697 break;
14698 case IEMVERIFYEVENT_IOPORT_WRITE:
14699 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14700 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14701 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14702 break;
14703 case IEMVERIFYEVENT_IOPORT_STR_READ:
14704 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14705 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14706 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14707 break;
14708 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14709 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14710 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14711 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14712 break;
14713 case IEMVERIFYEVENT_RAM_READ:
14714 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14715 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14716 break;
14717 case IEMVERIFYEVENT_RAM_WRITE:
14718 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14719 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14720 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14721 break;
14722 default:
14723 fEquals = false;
14724 break;
14725 }
14726 if (!fEquals)
14727 {
14728 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14729 break;
14730 }
14731
14732 /* advance */
14733 pIemRec = pIemRec->pNext;
14734 pOtherRec = pOtherRec->pNext;
14735 }
14736
14737 /* Ignore extra writes and reads. */
14738 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14739 {
14740 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14741 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14742 pIemRec = pIemRec->pNext;
14743 }
14744 if (pIemRec != NULL)
14745 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14746 else if (pOtherRec != NULL)
14747 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14748 }
14749 IEM_GET_CTX(pVCpu) = pOrgCtx;
14750
14751 return rcStrictIem;
14752}
14753
14754#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14755
14756/* stubs */
14757IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14758{
14759 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14760 return VERR_INTERNAL_ERROR;
14761}
14762
14763IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14764{
14765 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14766 return VERR_INTERNAL_ERROR;
14767}
14768
14769#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14770
14771
14772#ifdef LOG_ENABLED
14773/**
14774 * Logs the current instruction.
14775 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14776 * @param pCtx The current CPU context.
14777 * @param fSameCtx Set if we have the same context information as the VMM,
14778 * clear if we may have already executed an instruction in
14779 * our debug context. When clear, we assume IEMCPU holds
14780 * valid CPU mode info.
14781 */
14782IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14783{
14784# ifdef IN_RING3
14785 if (LogIs2Enabled())
14786 {
14787 char szInstr[256];
14788 uint32_t cbInstr = 0;
14789 if (fSameCtx)
14790 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14791 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14792 szInstr, sizeof(szInstr), &cbInstr);
14793 else
14794 {
14795 uint32_t fFlags = 0;
14796 switch (pVCpu->iem.s.enmCpuMode)
14797 {
14798 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14799 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14800 case IEMMODE_16BIT:
14801 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14802 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14803 else
14804 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14805 break;
14806 }
14807 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14808 szInstr, sizeof(szInstr), &cbInstr);
14809 }
14810
14811 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14812 Log2(("****\n"
14813 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14814 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14815 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14816 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14817 " %s\n"
14818 ,
14819 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14820 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14821 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14822 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14823 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14824 szInstr));
14825
14826 if (LogIs3Enabled())
14827 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14828 }
14829 else
14830# endif
14831 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14832 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14833 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14834}
14835#endif
14836
14837
14838/**
14839 * Makes status code addjustments (pass up from I/O and access handler)
14840 * as well as maintaining statistics.
14841 *
14842 * @returns Strict VBox status code to pass up.
14843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14844 * @param rcStrict The status from executing an instruction.
14845 */
14846DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14847{
14848 if (rcStrict != VINF_SUCCESS)
14849 {
14850 if (RT_SUCCESS(rcStrict))
14851 {
14852 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14853 || rcStrict == VINF_IOM_R3_IOPORT_READ
14854 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14855 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14856 || rcStrict == VINF_IOM_R3_MMIO_READ
14857 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14858 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14859 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14860 || rcStrict == VINF_CPUM_R3_MSR_READ
14861 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14862 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14863 || rcStrict == VINF_EM_RAW_TO_R3
14864 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14865 || rcStrict == VINF_EM_TRIPLE_FAULT
14866 /* raw-mode / virt handlers only: */
14867 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14868 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14869 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14870 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14871 || rcStrict == VINF_SELM_SYNC_GDT
14872 || rcStrict == VINF_CSAM_PENDING_ACTION
14873 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14874 /* nested hw.virt codes: */
14875 || rcStrict == VINF_SVM_VMEXIT
14876 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14877/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14878 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14879#ifdef VBOX_WITH_NESTED_HWVIRT
14880 if ( rcStrict == VINF_SVM_VMEXIT
14881 && rcPassUp == VINF_SUCCESS)
14882 rcStrict = VINF_SUCCESS;
14883 else
14884#endif
14885 if (rcPassUp == VINF_SUCCESS)
14886 pVCpu->iem.s.cRetInfStatuses++;
14887 else if ( rcPassUp < VINF_EM_FIRST
14888 || rcPassUp > VINF_EM_LAST
14889 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14890 {
14891 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14892 pVCpu->iem.s.cRetPassUpStatus++;
14893 rcStrict = rcPassUp;
14894 }
14895 else
14896 {
14897 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14898 pVCpu->iem.s.cRetInfStatuses++;
14899 }
14900 }
14901 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14902 pVCpu->iem.s.cRetAspectNotImplemented++;
14903 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14904 pVCpu->iem.s.cRetInstrNotImplemented++;
14905#ifdef IEM_VERIFICATION_MODE_FULL
14906 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14907 rcStrict = VINF_SUCCESS;
14908#endif
14909 else
14910 pVCpu->iem.s.cRetErrStatuses++;
14911 }
14912 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14913 {
14914 pVCpu->iem.s.cRetPassUpStatus++;
14915 rcStrict = pVCpu->iem.s.rcPassUp;
14916 }
14917
14918 return rcStrict;
14919}
14920
14921
14922/**
14923 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14924 * IEMExecOneWithPrefetchedByPC.
14925 *
14926 * Similar code is found in IEMExecLots.
14927 *
14928 * @return Strict VBox status code.
14929 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14931 * @param fExecuteInhibit If set, execute the instruction following CLI,
14932 * POP SS and MOV SS,GR.
14933 */
14934DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14935{
14936#ifdef IEM_WITH_SETJMP
14937 VBOXSTRICTRC rcStrict;
14938 jmp_buf JmpBuf;
14939 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14940 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14941 if ((rcStrict = setjmp(JmpBuf)) == 0)
14942 {
14943 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14944 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14945 }
14946 else
14947 pVCpu->iem.s.cLongJumps++;
14948 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14949#else
14950 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14951 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14952#endif
14953 if (rcStrict == VINF_SUCCESS)
14954 pVCpu->iem.s.cInstructions++;
14955 if (pVCpu->iem.s.cActiveMappings > 0)
14956 {
14957 Assert(rcStrict != VINF_SUCCESS);
14958 iemMemRollback(pVCpu);
14959 }
14960//#ifdef DEBUG
14961// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14962//#endif
14963
14964 /* Execute the next instruction as well if a cli, pop ss or
14965 mov ss, Gr has just completed successfully. */
14966 if ( fExecuteInhibit
14967 && rcStrict == VINF_SUCCESS
14968 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14969 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14970 {
14971 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14972 if (rcStrict == VINF_SUCCESS)
14973 {
14974#ifdef LOG_ENABLED
14975 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14976#endif
14977#ifdef IEM_WITH_SETJMP
14978 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14979 if ((rcStrict = setjmp(JmpBuf)) == 0)
14980 {
14981 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14982 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14983 }
14984 else
14985 pVCpu->iem.s.cLongJumps++;
14986 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14987#else
14988 IEM_OPCODE_GET_NEXT_U8(&b);
14989 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14990#endif
14991 if (rcStrict == VINF_SUCCESS)
14992 pVCpu->iem.s.cInstructions++;
14993 if (pVCpu->iem.s.cActiveMappings > 0)
14994 {
14995 Assert(rcStrict != VINF_SUCCESS);
14996 iemMemRollback(pVCpu);
14997 }
14998 }
14999 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
15000 }
15001
15002 /*
15003 * Return value fiddling, statistics and sanity assertions.
15004 */
15005 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15006
15007 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15008 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15009#if defined(IEM_VERIFICATION_MODE_FULL)
15010 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15011 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15012 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15013 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15014#endif
15015 return rcStrict;
15016}
15017
15018
15019#ifdef IN_RC
15020/**
15021 * Re-enters raw-mode or ensure we return to ring-3.
15022 *
15023 * @returns rcStrict, maybe modified.
15024 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15025 * @param pCtx The current CPU context.
15026 * @param rcStrict The status code returne by the interpreter.
15027 */
15028DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
15029{
15030 if ( !pVCpu->iem.s.fInPatchCode
15031 && ( rcStrict == VINF_SUCCESS
15032 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
15033 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
15034 {
15035 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
15036 CPUMRawEnter(pVCpu);
15037 else
15038 {
15039 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
15040 rcStrict = VINF_EM_RESCHEDULE;
15041 }
15042 }
15043 return rcStrict;
15044}
15045#endif
15046
15047
15048/**
15049 * Execute one instruction.
15050 *
15051 * @return Strict VBox status code.
15052 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15053 */
15054VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
15055{
15056#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15057 if (++pVCpu->iem.s.cVerifyDepth == 1)
15058 iemExecVerificationModeSetup(pVCpu);
15059#endif
15060#ifdef LOG_ENABLED
15061 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15062 iemLogCurInstr(pVCpu, pCtx, true);
15063#endif
15064
15065 /*
15066 * Do the decoding and emulation.
15067 */
15068 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15069 if (rcStrict == VINF_SUCCESS)
15070 rcStrict = iemExecOneInner(pVCpu, true);
15071
15072#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15073 /*
15074 * Assert some sanity.
15075 */
15076 if (pVCpu->iem.s.cVerifyDepth == 1)
15077 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15078 pVCpu->iem.s.cVerifyDepth--;
15079#endif
15080#ifdef IN_RC
15081 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15082#endif
15083 if (rcStrict != VINF_SUCCESS)
15084 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15085 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15086 return rcStrict;
15087}
15088
15089
15090VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15091{
15092 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15093 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15094
15095 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15096 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15097 if (rcStrict == VINF_SUCCESS)
15098 {
15099 rcStrict = iemExecOneInner(pVCpu, true);
15100 if (pcbWritten)
15101 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15102 }
15103
15104#ifdef IN_RC
15105 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15106#endif
15107 return rcStrict;
15108}
15109
15110
15111VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15112 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15113{
15114 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15115 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15116
15117 VBOXSTRICTRC rcStrict;
15118 if ( cbOpcodeBytes
15119 && pCtx->rip == OpcodeBytesPC)
15120 {
15121 iemInitDecoder(pVCpu, false);
15122#ifdef IEM_WITH_CODE_TLB
15123 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15124 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15125 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15126 pVCpu->iem.s.offCurInstrStart = 0;
15127 pVCpu->iem.s.offInstrNextByte = 0;
15128#else
15129 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15130 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15131#endif
15132 rcStrict = VINF_SUCCESS;
15133 }
15134 else
15135 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15136 if (rcStrict == VINF_SUCCESS)
15137 {
15138 rcStrict = iemExecOneInner(pVCpu, true);
15139 }
15140
15141#ifdef IN_RC
15142 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15143#endif
15144 return rcStrict;
15145}
15146
15147
15148VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15149{
15150 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15151 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15152
15153 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15154 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15155 if (rcStrict == VINF_SUCCESS)
15156 {
15157 rcStrict = iemExecOneInner(pVCpu, false);
15158 if (pcbWritten)
15159 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15160 }
15161
15162#ifdef IN_RC
15163 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15164#endif
15165 return rcStrict;
15166}
15167
15168
15169VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15170 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15171{
15172 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15173 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15174
15175 VBOXSTRICTRC rcStrict;
15176 if ( cbOpcodeBytes
15177 && pCtx->rip == OpcodeBytesPC)
15178 {
15179 iemInitDecoder(pVCpu, true);
15180#ifdef IEM_WITH_CODE_TLB
15181 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15182 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15183 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15184 pVCpu->iem.s.offCurInstrStart = 0;
15185 pVCpu->iem.s.offInstrNextByte = 0;
15186#else
15187 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15188 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15189#endif
15190 rcStrict = VINF_SUCCESS;
15191 }
15192 else
15193 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15194 if (rcStrict == VINF_SUCCESS)
15195 rcStrict = iemExecOneInner(pVCpu, false);
15196
15197#ifdef IN_RC
15198 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15199#endif
15200 return rcStrict;
15201}
15202
15203
15204/**
15205 * For debugging DISGetParamSize, may come in handy.
15206 *
15207 * @returns Strict VBox status code.
15208 * @param pVCpu The cross context virtual CPU structure of the
15209 * calling EMT.
15210 * @param pCtxCore The context core structure.
15211 * @param OpcodeBytesPC The PC of the opcode bytes.
15212 * @param pvOpcodeBytes Prefeched opcode bytes.
15213 * @param cbOpcodeBytes Number of prefetched bytes.
15214 * @param pcbWritten Where to return the number of bytes written.
15215 * Optional.
15216 */
15217VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15218 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
15219 uint32_t *pcbWritten)
15220{
15221 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15222 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15223
15224 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15225 VBOXSTRICTRC rcStrict;
15226 if ( cbOpcodeBytes
15227 && pCtx->rip == OpcodeBytesPC)
15228 {
15229 iemInitDecoder(pVCpu, true);
15230#ifdef IEM_WITH_CODE_TLB
15231 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15232 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15233 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15234 pVCpu->iem.s.offCurInstrStart = 0;
15235 pVCpu->iem.s.offInstrNextByte = 0;
15236#else
15237 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15238 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15239#endif
15240 rcStrict = VINF_SUCCESS;
15241 }
15242 else
15243 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15244 if (rcStrict == VINF_SUCCESS)
15245 {
15246 rcStrict = iemExecOneInner(pVCpu, false);
15247 if (pcbWritten)
15248 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15249 }
15250
15251#ifdef IN_RC
15252 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15253#endif
15254 return rcStrict;
15255}
15256
15257
15258VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
15259{
15260 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
15261
15262#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15263 /*
15264 * See if there is an interrupt pending in TRPM, inject it if we can.
15265 */
15266 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15267# ifdef IEM_VERIFICATION_MODE_FULL
15268 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15269# endif
15270
15271 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
15272# if defined(VBOX_WITH_NESTED_HWVIRT)
15273 bool fIntrEnabled = pCtx->hwvirt.Gif;
15274 if (fIntrEnabled)
15275 {
15276 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15277 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15278 else
15279 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15280 }
15281# else
15282 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15283# endif
15284 if ( fIntrEnabled
15285 && TRPMHasTrap(pVCpu)
15286 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15287 {
15288 uint8_t u8TrapNo;
15289 TRPMEVENT enmType;
15290 RTGCUINT uErrCode;
15291 RTGCPTR uCr2;
15292 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15293 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15294 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15295 TRPMResetTrap(pVCpu);
15296 }
15297
15298 /*
15299 * Log the state.
15300 */
15301# ifdef LOG_ENABLED
15302 iemLogCurInstr(pVCpu, pCtx, true);
15303# endif
15304
15305 /*
15306 * Do the decoding and emulation.
15307 */
15308 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15309 if (rcStrict == VINF_SUCCESS)
15310 rcStrict = iemExecOneInner(pVCpu, true);
15311
15312 /*
15313 * Assert some sanity.
15314 */
15315 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15316
15317 /*
15318 * Log and return.
15319 */
15320 if (rcStrict != VINF_SUCCESS)
15321 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15322 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15323 if (pcInstructions)
15324 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15325 return rcStrict;
15326
15327#else /* Not verification mode */
15328
15329 /*
15330 * See if there is an interrupt pending in TRPM, inject it if we can.
15331 */
15332 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15333# ifdef IEM_VERIFICATION_MODE_FULL
15334 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15335# endif
15336
15337 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
15338# if defined(VBOX_WITH_NESTED_HWVIRT)
15339 bool fIntrEnabled = pCtx->hwvirt.fGif;
15340 if (fIntrEnabled)
15341 {
15342 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15343 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15344 else
15345 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15346 }
15347# else
15348 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15349# endif
15350 if ( fIntrEnabled
15351 && TRPMHasTrap(pVCpu)
15352 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15353 {
15354 uint8_t u8TrapNo;
15355 TRPMEVENT enmType;
15356 RTGCUINT uErrCode;
15357 RTGCPTR uCr2;
15358 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15359 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15360 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15361 TRPMResetTrap(pVCpu);
15362 }
15363
15364 /*
15365 * Initial decoder init w/ prefetch, then setup setjmp.
15366 */
15367 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15368 if (rcStrict == VINF_SUCCESS)
15369 {
15370# ifdef IEM_WITH_SETJMP
15371 jmp_buf JmpBuf;
15372 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15373 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15374 pVCpu->iem.s.cActiveMappings = 0;
15375 if ((rcStrict = setjmp(JmpBuf)) == 0)
15376# endif
15377 {
15378 /*
15379 * The run loop. We limit ourselves to 4096 instructions right now.
15380 */
15381 PVM pVM = pVCpu->CTX_SUFF(pVM);
15382 uint32_t cInstr = 4096;
15383 for (;;)
15384 {
15385 /*
15386 * Log the state.
15387 */
15388# ifdef LOG_ENABLED
15389 iemLogCurInstr(pVCpu, pCtx, true);
15390# endif
15391
15392 /*
15393 * Do the decoding and emulation.
15394 */
15395 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15396 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15397 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15398 {
15399 Assert(pVCpu->iem.s.cActiveMappings == 0);
15400 pVCpu->iem.s.cInstructions++;
15401 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15402 {
15403 uint32_t fCpu = pVCpu->fLocalForcedActions
15404 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15405 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15406 | VMCPU_FF_TLB_FLUSH
15407# ifdef VBOX_WITH_RAW_MODE
15408 | VMCPU_FF_TRPM_SYNC_IDT
15409 | VMCPU_FF_SELM_SYNC_TSS
15410 | VMCPU_FF_SELM_SYNC_GDT
15411 | VMCPU_FF_SELM_SYNC_LDT
15412# endif
15413 | VMCPU_FF_INHIBIT_INTERRUPTS
15414 | VMCPU_FF_BLOCK_NMIS
15415 | VMCPU_FF_UNHALT ));
15416
15417 if (RT_LIKELY( ( !fCpu
15418 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15419 && !pCtx->rflags.Bits.u1IF) )
15420 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15421 {
15422 if (cInstr-- > 0)
15423 {
15424 Assert(pVCpu->iem.s.cActiveMappings == 0);
15425 iemReInitDecoder(pVCpu);
15426 continue;
15427 }
15428 }
15429 }
15430 Assert(pVCpu->iem.s.cActiveMappings == 0);
15431 }
15432 else if (pVCpu->iem.s.cActiveMappings > 0)
15433 iemMemRollback(pVCpu);
15434 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15435 break;
15436 }
15437 }
15438# ifdef IEM_WITH_SETJMP
15439 else
15440 {
15441 if (pVCpu->iem.s.cActiveMappings > 0)
15442 iemMemRollback(pVCpu);
15443 pVCpu->iem.s.cLongJumps++;
15444 }
15445 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15446# endif
15447
15448 /*
15449 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15450 */
15451 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15452 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15453# if defined(IEM_VERIFICATION_MODE_FULL)
15454 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15455 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15456 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15457 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15458# endif
15459 }
15460# ifdef VBOX_WITH_NESTED_HWVIRT
15461 else
15462 {
15463 /*
15464 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
15465 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
15466 */
15467 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15468 }
15469# endif
15470
15471 /*
15472 * Maybe re-enter raw-mode and log.
15473 */
15474# ifdef IN_RC
15475 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15476# endif
15477 if (rcStrict != VINF_SUCCESS)
15478 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15479 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15480 if (pcInstructions)
15481 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15482 return rcStrict;
15483#endif /* Not verification mode */
15484}
15485
15486
15487
15488/**
15489 * Injects a trap, fault, abort, software interrupt or external interrupt.
15490 *
15491 * The parameter list matches TRPMQueryTrapAll pretty closely.
15492 *
15493 * @returns Strict VBox status code.
15494 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15495 * @param u8TrapNo The trap number.
15496 * @param enmType What type is it (trap/fault/abort), software
15497 * interrupt or hardware interrupt.
15498 * @param uErrCode The error code if applicable.
15499 * @param uCr2 The CR2 value if applicable.
15500 * @param cbInstr The instruction length (only relevant for
15501 * software interrupts).
15502 */
15503VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15504 uint8_t cbInstr)
15505{
15506 iemInitDecoder(pVCpu, false);
15507#ifdef DBGFTRACE_ENABLED
15508 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15509 u8TrapNo, enmType, uErrCode, uCr2);
15510#endif
15511
15512 uint32_t fFlags;
15513 switch (enmType)
15514 {
15515 case TRPM_HARDWARE_INT:
15516 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15517 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15518 uErrCode = uCr2 = 0;
15519 break;
15520
15521 case TRPM_SOFTWARE_INT:
15522 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15523 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15524 uErrCode = uCr2 = 0;
15525 break;
15526
15527 case TRPM_TRAP:
15528 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15529 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15530 if (u8TrapNo == X86_XCPT_PF)
15531 fFlags |= IEM_XCPT_FLAGS_CR2;
15532 switch (u8TrapNo)
15533 {
15534 case X86_XCPT_DF:
15535 case X86_XCPT_TS:
15536 case X86_XCPT_NP:
15537 case X86_XCPT_SS:
15538 case X86_XCPT_PF:
15539 case X86_XCPT_AC:
15540 fFlags |= IEM_XCPT_FLAGS_ERR;
15541 break;
15542
15543 case X86_XCPT_NMI:
15544 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15545 break;
15546 }
15547 break;
15548
15549 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15550 }
15551
15552 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15553}
15554
15555
15556/**
15557 * Injects the active TRPM event.
15558 *
15559 * @returns Strict VBox status code.
15560 * @param pVCpu The cross context virtual CPU structure.
15561 */
15562VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15563{
15564#ifndef IEM_IMPLEMENTS_TASKSWITCH
15565 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15566#else
15567 uint8_t u8TrapNo;
15568 TRPMEVENT enmType;
15569 RTGCUINT uErrCode;
15570 RTGCUINTPTR uCr2;
15571 uint8_t cbInstr;
15572 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15573 if (RT_FAILURE(rc))
15574 return rc;
15575
15576 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15577
15578 /** @todo Are there any other codes that imply the event was successfully
15579 * delivered to the guest? See @bugref{6607}. */
15580 if ( rcStrict == VINF_SUCCESS
15581 || rcStrict == VINF_IEM_RAISED_XCPT)
15582 {
15583 TRPMResetTrap(pVCpu);
15584 }
15585 return rcStrict;
15586#endif
15587}
15588
15589
15590VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15591{
15592 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15593 return VERR_NOT_IMPLEMENTED;
15594}
15595
15596
15597VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15598{
15599 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15600 return VERR_NOT_IMPLEMENTED;
15601}
15602
15603
15604#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15605/**
15606 * Executes a IRET instruction with default operand size.
15607 *
15608 * This is for PATM.
15609 *
15610 * @returns VBox status code.
15611 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15612 * @param pCtxCore The register frame.
15613 */
15614VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15615{
15616 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15617
15618 iemCtxCoreToCtx(pCtx, pCtxCore);
15619 iemInitDecoder(pVCpu);
15620 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15621 if (rcStrict == VINF_SUCCESS)
15622 iemCtxToCtxCore(pCtxCore, pCtx);
15623 else
15624 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15625 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15626 return rcStrict;
15627}
15628#endif
15629
15630
15631/**
15632 * Macro used by the IEMExec* method to check the given instruction length.
15633 *
15634 * Will return on failure!
15635 *
15636 * @param a_cbInstr The given instruction length.
15637 * @param a_cbMin The minimum length.
15638 */
15639#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15640 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15641 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15642
15643
15644/**
15645 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15646 *
15647 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15648 *
15649 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15651 * @param rcStrict The status code to fiddle.
15652 */
15653DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15654{
15655 iemUninitExec(pVCpu);
15656#ifdef IN_RC
15657 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15658 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15659#else
15660 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15661#endif
15662}
15663
15664
15665/**
15666 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15667 *
15668 * This API ASSUMES that the caller has already verified that the guest code is
15669 * allowed to access the I/O port. (The I/O port is in the DX register in the
15670 * guest state.)
15671 *
15672 * @returns Strict VBox status code.
15673 * @param pVCpu The cross context virtual CPU structure.
15674 * @param cbValue The size of the I/O port access (1, 2, or 4).
15675 * @param enmAddrMode The addressing mode.
15676 * @param fRepPrefix Indicates whether a repeat prefix is used
15677 * (doesn't matter which for this instruction).
15678 * @param cbInstr The instruction length in bytes.
15679 * @param iEffSeg The effective segment address.
15680 * @param fIoChecked Whether the access to the I/O port has been
15681 * checked or not. It's typically checked in the
15682 * HM scenario.
15683 */
15684VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15685 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15686{
15687 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15688 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15689
15690 /*
15691 * State init.
15692 */
15693 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15694
15695 /*
15696 * Switch orgy for getting to the right handler.
15697 */
15698 VBOXSTRICTRC rcStrict;
15699 if (fRepPrefix)
15700 {
15701 switch (enmAddrMode)
15702 {
15703 case IEMMODE_16BIT:
15704 switch (cbValue)
15705 {
15706 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15707 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15708 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15709 default:
15710 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15711 }
15712 break;
15713
15714 case IEMMODE_32BIT:
15715 switch (cbValue)
15716 {
15717 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15718 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15719 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15720 default:
15721 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15722 }
15723 break;
15724
15725 case IEMMODE_64BIT:
15726 switch (cbValue)
15727 {
15728 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15729 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15730 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15731 default:
15732 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15733 }
15734 break;
15735
15736 default:
15737 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15738 }
15739 }
15740 else
15741 {
15742 switch (enmAddrMode)
15743 {
15744 case IEMMODE_16BIT:
15745 switch (cbValue)
15746 {
15747 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15748 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15749 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15750 default:
15751 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15752 }
15753 break;
15754
15755 case IEMMODE_32BIT:
15756 switch (cbValue)
15757 {
15758 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15759 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15760 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15761 default:
15762 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15763 }
15764 break;
15765
15766 case IEMMODE_64BIT:
15767 switch (cbValue)
15768 {
15769 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15770 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15771 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15772 default:
15773 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15774 }
15775 break;
15776
15777 default:
15778 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15779 }
15780 }
15781
15782 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15783}
15784
15785
15786/**
15787 * Interface for HM and EM for executing string I/O IN (read) instructions.
15788 *
15789 * This API ASSUMES that the caller has already verified that the guest code is
15790 * allowed to access the I/O port. (The I/O port is in the DX register in the
15791 * guest state.)
15792 *
15793 * @returns Strict VBox status code.
15794 * @param pVCpu The cross context virtual CPU structure.
15795 * @param cbValue The size of the I/O port access (1, 2, or 4).
15796 * @param enmAddrMode The addressing mode.
15797 * @param fRepPrefix Indicates whether a repeat prefix is used
15798 * (doesn't matter which for this instruction).
15799 * @param cbInstr The instruction length in bytes.
15800 * @param fIoChecked Whether the access to the I/O port has been
15801 * checked or not. It's typically checked in the
15802 * HM scenario.
15803 */
15804VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15805 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15806{
15807 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15808
15809 /*
15810 * State init.
15811 */
15812 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15813
15814 /*
15815 * Switch orgy for getting to the right handler.
15816 */
15817 VBOXSTRICTRC rcStrict;
15818 if (fRepPrefix)
15819 {
15820 switch (enmAddrMode)
15821 {
15822 case IEMMODE_16BIT:
15823 switch (cbValue)
15824 {
15825 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15826 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15827 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15828 default:
15829 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15830 }
15831 break;
15832
15833 case IEMMODE_32BIT:
15834 switch (cbValue)
15835 {
15836 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15837 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15838 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15839 default:
15840 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15841 }
15842 break;
15843
15844 case IEMMODE_64BIT:
15845 switch (cbValue)
15846 {
15847 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15848 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15849 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15850 default:
15851 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15852 }
15853 break;
15854
15855 default:
15856 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15857 }
15858 }
15859 else
15860 {
15861 switch (enmAddrMode)
15862 {
15863 case IEMMODE_16BIT:
15864 switch (cbValue)
15865 {
15866 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15867 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15868 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15869 default:
15870 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15871 }
15872 break;
15873
15874 case IEMMODE_32BIT:
15875 switch (cbValue)
15876 {
15877 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15878 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15879 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15880 default:
15881 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15882 }
15883 break;
15884
15885 case IEMMODE_64BIT:
15886 switch (cbValue)
15887 {
15888 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15889 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15890 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15891 default:
15892 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15893 }
15894 break;
15895
15896 default:
15897 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15898 }
15899 }
15900
15901 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15902}
15903
15904
15905/**
15906 * Interface for rawmode to write execute an OUT instruction.
15907 *
15908 * @returns Strict VBox status code.
15909 * @param pVCpu The cross context virtual CPU structure.
15910 * @param cbInstr The instruction length in bytes.
15911 * @param u16Port The port to read.
15912 * @param cbReg The register size.
15913 *
15914 * @remarks In ring-0 not all of the state needs to be synced in.
15915 */
15916VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15917{
15918 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15919 Assert(cbReg <= 4 && cbReg != 3);
15920
15921 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15922 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15923 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15924}
15925
15926
15927/**
15928 * Interface for rawmode to write execute an IN instruction.
15929 *
15930 * @returns Strict VBox status code.
15931 * @param pVCpu The cross context virtual CPU structure.
15932 * @param cbInstr The instruction length in bytes.
15933 * @param u16Port The port to read.
15934 * @param cbReg The register size.
15935 */
15936VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15937{
15938 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15939 Assert(cbReg <= 4 && cbReg != 3);
15940
15941 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15942 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15943 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15944}
15945
15946
15947/**
15948 * Interface for HM and EM to write to a CRx register.
15949 *
15950 * @returns Strict VBox status code.
15951 * @param pVCpu The cross context virtual CPU structure.
15952 * @param cbInstr The instruction length in bytes.
15953 * @param iCrReg The control register number (destination).
15954 * @param iGReg The general purpose register number (source).
15955 *
15956 * @remarks In ring-0 not all of the state needs to be synced in.
15957 */
15958VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15959{
15960 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15961 Assert(iCrReg < 16);
15962 Assert(iGReg < 16);
15963
15964 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15965 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15966 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15967}
15968
15969
15970/**
15971 * Interface for HM and EM to read from a CRx register.
15972 *
15973 * @returns Strict VBox status code.
15974 * @param pVCpu The cross context virtual CPU structure.
15975 * @param cbInstr The instruction length in bytes.
15976 * @param iGReg The general purpose register number (destination).
15977 * @param iCrReg The control register number (source).
15978 *
15979 * @remarks In ring-0 not all of the state needs to be synced in.
15980 */
15981VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15982{
15983 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15984 Assert(iCrReg < 16);
15985 Assert(iGReg < 16);
15986
15987 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15988 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15989 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15990}
15991
15992
15993/**
15994 * Interface for HM and EM to clear the CR0[TS] bit.
15995 *
15996 * @returns Strict VBox status code.
15997 * @param pVCpu The cross context virtual CPU structure.
15998 * @param cbInstr The instruction length in bytes.
15999 *
16000 * @remarks In ring-0 not all of the state needs to be synced in.
16001 */
16002VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
16003{
16004 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16005
16006 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16007 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
16008 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16009}
16010
16011
16012/**
16013 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
16014 *
16015 * @returns Strict VBox status code.
16016 * @param pVCpu The cross context virtual CPU structure.
16017 * @param cbInstr The instruction length in bytes.
16018 * @param uValue The value to load into CR0.
16019 *
16020 * @remarks In ring-0 not all of the state needs to be synced in.
16021 */
16022VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
16023{
16024 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16025
16026 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16027 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
16028 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16029}
16030
16031
16032/**
16033 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
16034 *
16035 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
16036 *
16037 * @returns Strict VBox status code.
16038 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16039 * @param cbInstr The instruction length in bytes.
16040 * @remarks In ring-0 not all of the state needs to be synced in.
16041 * @thread EMT(pVCpu)
16042 */
16043VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
16044{
16045 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16046
16047 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16048 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
16049 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16050}
16051
16052
16053/**
16054 * Interface for HM and EM to emulate the INVLPG instruction.
16055 *
16056 * @param pVCpu The cross context virtual CPU structure.
16057 * @param cbInstr The instruction length in bytes.
16058 * @param GCPtrPage The effective address of the page to invalidate.
16059 *
16060 * @remarks In ring-0 not all of the state needs to be synced in.
16061 */
16062VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
16063{
16064 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16065
16066 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16067 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
16068 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16069}
16070
16071
16072/**
16073 * Interface for HM and EM to emulate the INVPCID instruction.
16074 *
16075 * @param pVCpu The cross context virtual CPU structure.
16076 * @param cbInstr The instruction length in bytes.
16077 * @param uType The invalidation type.
16078 * @param GCPtrInvpcidDesc The effective address of the INVPCID descriptor.
16079 *
16080 * @remarks In ring-0 not all of the state needs to be synced in.
16081 */
16082VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)
16083{
16084 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
16085
16086 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16087 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);
16088 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16089}
16090
16091
16092/**
16093 * Checks if IEM is in the process of delivering an event (interrupt or
16094 * exception).
16095 *
16096 * @returns true if we're in the process of raising an interrupt or exception,
16097 * false otherwise.
16098 * @param pVCpu The cross context virtual CPU structure.
16099 * @param puVector Where to store the vector associated with the
16100 * currently delivered event, optional.
16101 * @param pfFlags Where to store th event delivery flags (see
16102 * IEM_XCPT_FLAGS_XXX), optional.
16103 * @param puErr Where to store the error code associated with the
16104 * event, optional.
16105 * @param puCr2 Where to store the CR2 associated with the event,
16106 * optional.
16107 * @remarks The caller should check the flags to determine if the error code and
16108 * CR2 are valid for the event.
16109 */
16110VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
16111{
16112 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16113 if (fRaisingXcpt)
16114 {
16115 if (puVector)
16116 *puVector = pVCpu->iem.s.uCurXcpt;
16117 if (pfFlags)
16118 *pfFlags = pVCpu->iem.s.fCurXcpt;
16119 if (puErr)
16120 *puErr = pVCpu->iem.s.uCurXcptErr;
16121 if (puCr2)
16122 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16123 }
16124 return fRaisingXcpt;
16125}
16126
16127#ifdef VBOX_WITH_NESTED_HWVIRT
16128/**
16129 * Interface for HM and EM to emulate the CLGI instruction.
16130 *
16131 * @returns Strict VBox status code.
16132 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16133 * @param cbInstr The instruction length in bytes.
16134 * @thread EMT(pVCpu)
16135 */
16136VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
16137{
16138 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16139
16140 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16141 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16142 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16143}
16144
16145
16146/**
16147 * Interface for HM and EM to emulate the STGI instruction.
16148 *
16149 * @returns Strict VBox status code.
16150 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16151 * @param cbInstr The instruction length in bytes.
16152 * @thread EMT(pVCpu)
16153 */
16154VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
16155{
16156 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16157
16158 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16159 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16160 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16161}
16162
16163
16164/**
16165 * Interface for HM and EM to emulate the VMLOAD instruction.
16166 *
16167 * @returns Strict VBox status code.
16168 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16169 * @param cbInstr The instruction length in bytes.
16170 * @thread EMT(pVCpu)
16171 */
16172VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
16173{
16174 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16175
16176 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16177 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16178 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16179}
16180
16181
16182/**
16183 * Interface for HM and EM to emulate the VMSAVE instruction.
16184 *
16185 * @returns Strict VBox status code.
16186 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16187 * @param cbInstr The instruction length in bytes.
16188 * @thread EMT(pVCpu)
16189 */
16190VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
16191{
16192 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16193
16194 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16195 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16196 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16197}
16198
16199
16200/**
16201 * Interface for HM and EM to emulate the INVLPGA instruction.
16202 *
16203 * @returns Strict VBox status code.
16204 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16205 * @param cbInstr The instruction length in bytes.
16206 * @thread EMT(pVCpu)
16207 */
16208VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
16209{
16210 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16211
16212 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16213 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16214 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16215}
16216
16217
16218/**
16219 * Interface for HM and EM to emulate the VMRUN instruction.
16220 *
16221 * @returns Strict VBox status code.
16222 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16223 * @param cbInstr The instruction length in bytes.
16224 * @thread EMT(pVCpu)
16225 */
16226VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
16227{
16228 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16229
16230 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16231 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
16232 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16233}
16234
16235
16236/**
16237 * Interface for HM and EM to emulate \#VMEXIT.
16238 *
16239 * @returns Strict VBox status code.
16240 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16241 * @param uExitCode The exit code.
16242 * @param uExitInfo1 The exit info. 1 field.
16243 * @param uExitInfo2 The exit info. 2 field.
16244 * @thread EMT(pVCpu)
16245 */
16246VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
16247{
16248 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2);
16249 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16250}
16251#endif /* VBOX_WITH_NESTED_HWVIRT */
16252
16253#ifdef IN_RING3
16254
16255/**
16256 * Handles the unlikely and probably fatal merge cases.
16257 *
16258 * @returns Merged status code.
16259 * @param rcStrict Current EM status code.
16260 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16261 * with @a rcStrict.
16262 * @param iMemMap The memory mapping index. For error reporting only.
16263 * @param pVCpu The cross context virtual CPU structure of the calling
16264 * thread, for error reporting only.
16265 */
16266DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16267 unsigned iMemMap, PVMCPU pVCpu)
16268{
16269 if (RT_FAILURE_NP(rcStrict))
16270 return rcStrict;
16271
16272 if (RT_FAILURE_NP(rcStrictCommit))
16273 return rcStrictCommit;
16274
16275 if (rcStrict == rcStrictCommit)
16276 return rcStrictCommit;
16277
16278 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16279 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16280 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16281 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16282 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16283 return VERR_IOM_FF_STATUS_IPE;
16284}
16285
16286
16287/**
16288 * Helper for IOMR3ProcessForceFlag.
16289 *
16290 * @returns Merged status code.
16291 * @param rcStrict Current EM status code.
16292 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16293 * with @a rcStrict.
16294 * @param iMemMap The memory mapping index. For error reporting only.
16295 * @param pVCpu The cross context virtual CPU structure of the calling
16296 * thread, for error reporting only.
16297 */
16298DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16299{
16300 /* Simple. */
16301 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16302 return rcStrictCommit;
16303
16304 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16305 return rcStrict;
16306
16307 /* EM scheduling status codes. */
16308 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16309 && rcStrict <= VINF_EM_LAST))
16310 {
16311 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16312 && rcStrictCommit <= VINF_EM_LAST))
16313 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16314 }
16315
16316 /* Unlikely */
16317 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16318}
16319
16320
16321/**
16322 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16323 *
16324 * @returns Merge between @a rcStrict and what the commit operation returned.
16325 * @param pVM The cross context VM structure.
16326 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16327 * @param rcStrict The status code returned by ring-0 or raw-mode.
16328 */
16329VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16330{
16331 /*
16332 * Reset the pending commit.
16333 */
16334 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16335 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16336 ("%#x %#x %#x\n",
16337 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16338 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16339
16340 /*
16341 * Commit the pending bounce buffers (usually just one).
16342 */
16343 unsigned cBufs = 0;
16344 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16345 while (iMemMap-- > 0)
16346 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16347 {
16348 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16349 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16350 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16351
16352 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16353 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16354 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16355
16356 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16357 {
16358 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16359 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16360 pbBuf,
16361 cbFirst,
16362 PGMACCESSORIGIN_IEM);
16363 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16364 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16365 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16366 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16367 }
16368
16369 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16370 {
16371 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16372 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16373 pbBuf + cbFirst,
16374 cbSecond,
16375 PGMACCESSORIGIN_IEM);
16376 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16377 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16378 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16379 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16380 }
16381 cBufs++;
16382 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16383 }
16384
16385 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16386 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16387 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16388 pVCpu->iem.s.cActiveMappings = 0;
16389 return rcStrict;
16390}
16391
16392#endif /* IN_RING3 */
16393
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette