VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 70310

Last change on this file since 70310 was 70255, checked in by vboxsync, 7 years ago

VMM/IEM: Match AMD spec exactly whenever possible while naming SVM specific feature, named "decode assists" (plural).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 635.2 KB
Line 
1/* $Id: IEMAll.cpp 70255 2017-12-21 05:54:37Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/em.h>
106# include <VBox/vmm/hm_svm.h>
107#endif
108#include <VBox/vmm/tm.h>
109#include <VBox/vmm/dbgf.h>
110#include <VBox/vmm/dbgftrace.h>
111#ifdef VBOX_WITH_RAW_MODE_NOT_R0
112# include <VBox/vmm/patm.h>
113# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
114# include <VBox/vmm/csam.h>
115# endif
116#endif
117#include "IEMInternal.h"
118#ifdef IEM_VERIFICATION_MODE_FULL
119# include <VBox/vmm/rem.h>
120# include <VBox/vmm/mm.h>
121#endif
122#include <VBox/vmm/vm.h>
123#include <VBox/log.h>
124#include <VBox/err.h>
125#include <VBox/param.h>
126#include <VBox/dis.h>
127#include <VBox/disopcode.h>
128#include <iprt/assert.h>
129#include <iprt/string.h>
130#include <iprt/x86.h>
131
132
133/*********************************************************************************************************************************
134* Structures and Typedefs *
135*********************************************************************************************************************************/
136/** @typedef PFNIEMOP
137 * Pointer to an opcode decoder function.
138 */
139
140/** @def FNIEMOP_DEF
141 * Define an opcode decoder function.
142 *
143 * We're using macors for this so that adding and removing parameters as well as
144 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
145 *
146 * @param a_Name The function name.
147 */
148
149/** @typedef PFNIEMOPRM
150 * Pointer to an opcode decoder function with RM byte.
151 */
152
153/** @def FNIEMOPRM_DEF
154 * Define an opcode decoder function with RM byte.
155 *
156 * We're using macors for this so that adding and removing parameters as well as
157 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
158 *
159 * @param a_Name The function name.
160 */
161
162#if defined(__GNUC__) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
171
172#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
181
182#elif defined(__GNUC__)
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
191
192#else
193typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
194typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
195# define FNIEMOP_DEF(a_Name) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
197# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
198 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
199# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
200 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
201
202#endif
203#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
204
205
206/**
207 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
208 */
209typedef union IEMSELDESC
210{
211 /** The legacy view. */
212 X86DESC Legacy;
213 /** The long mode view. */
214 X86DESC64 Long;
215} IEMSELDESC;
216/** Pointer to a selector descriptor table entry. */
217typedef IEMSELDESC *PIEMSELDESC;
218
219/**
220 * CPU exception classes.
221 */
222typedef enum IEMXCPTCLASS
223{
224 IEMXCPTCLASS_BENIGN,
225 IEMXCPTCLASS_CONTRIBUTORY,
226 IEMXCPTCLASS_PAGE_FAULT,
227 IEMXCPTCLASS_DOUBLE_FAULT
228} IEMXCPTCLASS;
229
230
231/*********************************************************************************************************************************
232* Defined Constants And Macros *
233*********************************************************************************************************************************/
234/** @def IEM_WITH_SETJMP
235 * Enables alternative status code handling using setjmps.
236 *
237 * This adds a bit of expense via the setjmp() call since it saves all the
238 * non-volatile registers. However, it eliminates return code checks and allows
239 * for more optimal return value passing (return regs instead of stack buffer).
240 */
241#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
242# define IEM_WITH_SETJMP
243#endif
244
245/** Temporary hack to disable the double execution. Will be removed in favor
246 * of a dedicated execution mode in EM. */
247//#define IEM_VERIFICATION_MODE_NO_REM
248
249/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
250 * due to GCC lacking knowledge about the value range of a switch. */
251#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
252
253/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
254#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation.
259 */
260#ifdef LOG_ENABLED
261# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
262 do { \
263 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
265 } while (0)
266#else
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
269#endif
270
271/**
272 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
273 * occation using the supplied logger statement.
274 *
275 * @param a_LoggerArgs What to log on failure.
276 */
277#ifdef LOG_ENABLED
278# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
279 do { \
280 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
281 /*LogFunc(a_LoggerArgs);*/ \
282 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
283 } while (0)
284#else
285# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
286 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
287#endif
288
289/**
290 * Call an opcode decoder function.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF.
294 */
295#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
304
305/**
306 * Call a common opcode decoder function taking one extra argument.
307 *
308 * We're using macors for this so that adding and removing parameters can be
309 * done as we please. See FNIEMOP_DEF_1.
310 */
311#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
312
313/**
314 * Check if we're currently executing in real or virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The IEM state of the current CPU.
318 */
319#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in virtual 8086 mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in long mode.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT
391/**
392 * Check the common SVM instruction preconditions.
393 */
394# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
395 do { \
396 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
397 { \
398 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
399 return iemRaiseUndefinedOpcode(pVCpu); \
400 } \
401 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
402 { \
403 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
404 return iemRaiseUndefinedOpcode(pVCpu); \
405 } \
406 if (pVCpu->iem.s.uCpl != 0) \
407 { \
408 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
409 return iemRaiseGeneralProtectionFault0(pVCpu); \
410 } \
411 } while (0)
412
413/**
414 * Check if an SVM is enabled.
415 */
416# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
417
418/**
419 * Check if an SVM control/instruction intercept is set.
420 */
421# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
422
423/**
424 * Check if an SVM read CRx intercept is set.
425 */
426# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
427
428/**
429 * Check if an SVM write CRx intercept is set.
430 */
431# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
432
433/**
434 * Check if an SVM read DRx intercept is set.
435 */
436# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
437
438/**
439 * Check if an SVM write DRx intercept is set.
440 */
441# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
442
443/**
444 * Check if an SVM exception intercept is set.
445 */
446# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
447
448/**
449 * Invokes the SVM \#VMEXIT handler for the nested-guest.
450 */
451# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
452 do \
453 { \
454 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
455 } while (0)
456
457/**
458 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
459 * corresponding decode assist information.
460 */
461# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
462 do \
463 { \
464 uint64_t uExitInfo1; \
465 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
466 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
467 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
468 else \
469 uExitInfo1 = 0; \
470 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
471 } while (0)
472
473#else
474# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
475# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
476# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
477# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
478# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
479# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
480# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
481# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
482# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
483# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
484
485#endif /* VBOX_WITH_NESTED_HWVIRT */
486
487
488/*********************************************************************************************************************************
489* Global Variables *
490*********************************************************************************************************************************/
491extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
492
493
494/** Function table for the ADD instruction. */
495IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
496{
497 iemAImpl_add_u8, iemAImpl_add_u8_locked,
498 iemAImpl_add_u16, iemAImpl_add_u16_locked,
499 iemAImpl_add_u32, iemAImpl_add_u32_locked,
500 iemAImpl_add_u64, iemAImpl_add_u64_locked
501};
502
503/** Function table for the ADC instruction. */
504IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
505{
506 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
507 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
508 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
509 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
510};
511
512/** Function table for the SUB instruction. */
513IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
514{
515 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
516 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
517 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
518 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
519};
520
521/** Function table for the SBB instruction. */
522IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
523{
524 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
525 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
526 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
527 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
528};
529
530/** Function table for the OR instruction. */
531IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
532{
533 iemAImpl_or_u8, iemAImpl_or_u8_locked,
534 iemAImpl_or_u16, iemAImpl_or_u16_locked,
535 iemAImpl_or_u32, iemAImpl_or_u32_locked,
536 iemAImpl_or_u64, iemAImpl_or_u64_locked
537};
538
539/** Function table for the XOR instruction. */
540IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
541{
542 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
543 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
544 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
545 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
546};
547
548/** Function table for the AND instruction. */
549IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
550{
551 iemAImpl_and_u8, iemAImpl_and_u8_locked,
552 iemAImpl_and_u16, iemAImpl_and_u16_locked,
553 iemAImpl_and_u32, iemAImpl_and_u32_locked,
554 iemAImpl_and_u64, iemAImpl_and_u64_locked
555};
556
557/** Function table for the CMP instruction.
558 * @remarks Making operand order ASSUMPTIONS.
559 */
560IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
561{
562 iemAImpl_cmp_u8, NULL,
563 iemAImpl_cmp_u16, NULL,
564 iemAImpl_cmp_u32, NULL,
565 iemAImpl_cmp_u64, NULL
566};
567
568/** Function table for the TEST instruction.
569 * @remarks Making operand order ASSUMPTIONS.
570 */
571IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
572{
573 iemAImpl_test_u8, NULL,
574 iemAImpl_test_u16, NULL,
575 iemAImpl_test_u32, NULL,
576 iemAImpl_test_u64, NULL
577};
578
579/** Function table for the BT instruction. */
580IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
581{
582 NULL, NULL,
583 iemAImpl_bt_u16, NULL,
584 iemAImpl_bt_u32, NULL,
585 iemAImpl_bt_u64, NULL
586};
587
588/** Function table for the BTC instruction. */
589IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
590{
591 NULL, NULL,
592 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
593 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
594 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
595};
596
597/** Function table for the BTR instruction. */
598IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
599{
600 NULL, NULL,
601 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
602 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
603 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
604};
605
606/** Function table for the BTS instruction. */
607IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
608{
609 NULL, NULL,
610 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
611 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
612 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
613};
614
615/** Function table for the BSF instruction. */
616IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
617{
618 NULL, NULL,
619 iemAImpl_bsf_u16, NULL,
620 iemAImpl_bsf_u32, NULL,
621 iemAImpl_bsf_u64, NULL
622};
623
624/** Function table for the BSR instruction. */
625IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
626{
627 NULL, NULL,
628 iemAImpl_bsr_u16, NULL,
629 iemAImpl_bsr_u32, NULL,
630 iemAImpl_bsr_u64, NULL
631};
632
633/** Function table for the IMUL instruction. */
634IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
635{
636 NULL, NULL,
637 iemAImpl_imul_two_u16, NULL,
638 iemAImpl_imul_two_u32, NULL,
639 iemAImpl_imul_two_u64, NULL
640};
641
642/** Group 1 /r lookup table. */
643IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
644{
645 &g_iemAImpl_add,
646 &g_iemAImpl_or,
647 &g_iemAImpl_adc,
648 &g_iemAImpl_sbb,
649 &g_iemAImpl_and,
650 &g_iemAImpl_sub,
651 &g_iemAImpl_xor,
652 &g_iemAImpl_cmp
653};
654
655/** Function table for the INC instruction. */
656IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
657{
658 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
659 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
660 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
661 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
662};
663
664/** Function table for the DEC instruction. */
665IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
666{
667 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
668 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
669 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
670 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
671};
672
673/** Function table for the NEG instruction. */
674IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
675{
676 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
677 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
678 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
679 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
680};
681
682/** Function table for the NOT instruction. */
683IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
684{
685 iemAImpl_not_u8, iemAImpl_not_u8_locked,
686 iemAImpl_not_u16, iemAImpl_not_u16_locked,
687 iemAImpl_not_u32, iemAImpl_not_u32_locked,
688 iemAImpl_not_u64, iemAImpl_not_u64_locked
689};
690
691
692/** Function table for the ROL instruction. */
693IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
694{
695 iemAImpl_rol_u8,
696 iemAImpl_rol_u16,
697 iemAImpl_rol_u32,
698 iemAImpl_rol_u64
699};
700
701/** Function table for the ROR instruction. */
702IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
703{
704 iemAImpl_ror_u8,
705 iemAImpl_ror_u16,
706 iemAImpl_ror_u32,
707 iemAImpl_ror_u64
708};
709
710/** Function table for the RCL instruction. */
711IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
712{
713 iemAImpl_rcl_u8,
714 iemAImpl_rcl_u16,
715 iemAImpl_rcl_u32,
716 iemAImpl_rcl_u64
717};
718
719/** Function table for the RCR instruction. */
720IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
721{
722 iemAImpl_rcr_u8,
723 iemAImpl_rcr_u16,
724 iemAImpl_rcr_u32,
725 iemAImpl_rcr_u64
726};
727
728/** Function table for the SHL instruction. */
729IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
730{
731 iemAImpl_shl_u8,
732 iemAImpl_shl_u16,
733 iemAImpl_shl_u32,
734 iemAImpl_shl_u64
735};
736
737/** Function table for the SHR instruction. */
738IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
739{
740 iemAImpl_shr_u8,
741 iemAImpl_shr_u16,
742 iemAImpl_shr_u32,
743 iemAImpl_shr_u64
744};
745
746/** Function table for the SAR instruction. */
747IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
748{
749 iemAImpl_sar_u8,
750 iemAImpl_sar_u16,
751 iemAImpl_sar_u32,
752 iemAImpl_sar_u64
753};
754
755
756/** Function table for the MUL instruction. */
757IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
758{
759 iemAImpl_mul_u8,
760 iemAImpl_mul_u16,
761 iemAImpl_mul_u32,
762 iemAImpl_mul_u64
763};
764
765/** Function table for the IMUL instruction working implicitly on rAX. */
766IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
767{
768 iemAImpl_imul_u8,
769 iemAImpl_imul_u16,
770 iemAImpl_imul_u32,
771 iemAImpl_imul_u64
772};
773
774/** Function table for the DIV instruction. */
775IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
776{
777 iemAImpl_div_u8,
778 iemAImpl_div_u16,
779 iemAImpl_div_u32,
780 iemAImpl_div_u64
781};
782
783/** Function table for the MUL instruction. */
784IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
785{
786 iemAImpl_idiv_u8,
787 iemAImpl_idiv_u16,
788 iemAImpl_idiv_u32,
789 iemAImpl_idiv_u64
790};
791
792/** Function table for the SHLD instruction */
793IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
794{
795 iemAImpl_shld_u16,
796 iemAImpl_shld_u32,
797 iemAImpl_shld_u64,
798};
799
800/** Function table for the SHRD instruction */
801IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
802{
803 iemAImpl_shrd_u16,
804 iemAImpl_shrd_u32,
805 iemAImpl_shrd_u64,
806};
807
808
809/** Function table for the PUNPCKLBW instruction */
810IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
811/** Function table for the PUNPCKLBD instruction */
812IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
813/** Function table for the PUNPCKLDQ instruction */
814IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
815/** Function table for the PUNPCKLQDQ instruction */
816IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
817
818/** Function table for the PUNPCKHBW instruction */
819IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
820/** Function table for the PUNPCKHBD instruction */
821IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
822/** Function table for the PUNPCKHDQ instruction */
823IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
824/** Function table for the PUNPCKHQDQ instruction */
825IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
826
827/** Function table for the PXOR instruction */
828IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
829/** Function table for the PCMPEQB instruction */
830IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
831/** Function table for the PCMPEQW instruction */
832IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
833/** Function table for the PCMPEQD instruction */
834IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
835
836
837#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
838/** What IEM just wrote. */
839uint8_t g_abIemWrote[256];
840/** How much IEM just wrote. */
841size_t g_cbIemWrote;
842#endif
843
844
845/*********************************************************************************************************************************
846* Internal Functions *
847*********************************************************************************************************************************/
848IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
849IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
850IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
851IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
852/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
853IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
854IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
855IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
856IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
857IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
858IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
859IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
860IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
861IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
862IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
863IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
864IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
865#ifdef IEM_WITH_SETJMP
866DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
868DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
869DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
870DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
871#endif
872
873IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
874IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
875IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
876IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
877IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
878IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
879IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
880IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
881IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
882IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
883IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
884IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
885IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
886IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
887IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
888IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
889
890#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
891IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
892#endif
893IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
894IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
895
896#ifdef VBOX_WITH_NESTED_HWVIRT
897IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
898 uint64_t uExitInfo2);
899IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
900 uint32_t uErr, uint64_t uCr2);
901#endif
902
903/**
904 * Sets the pass up status.
905 *
906 * @returns VINF_SUCCESS.
907 * @param pVCpu The cross context virtual CPU structure of the
908 * calling thread.
909 * @param rcPassUp The pass up status. Must be informational.
910 * VINF_SUCCESS is not allowed.
911 */
912IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
913{
914 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
915
916 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
917 if (rcOldPassUp == VINF_SUCCESS)
918 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
919 /* If both are EM scheduling codes, use EM priority rules. */
920 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
921 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
922 {
923 if (rcPassUp < rcOldPassUp)
924 {
925 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
926 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
927 }
928 else
929 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
930 }
931 /* Override EM scheduling with specific status code. */
932 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
933 {
934 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
935 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
936 }
937 /* Don't override specific status code, first come first served. */
938 else
939 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
940 return VINF_SUCCESS;
941}
942
943
944/**
945 * Calculates the CPU mode.
946 *
947 * This is mainly for updating IEMCPU::enmCpuMode.
948 *
949 * @returns CPU mode.
950 * @param pCtx The register context for the CPU.
951 */
952DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
953{
954 if (CPUMIsGuestIn64BitCodeEx(pCtx))
955 return IEMMODE_64BIT;
956 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
957 return IEMMODE_32BIT;
958 return IEMMODE_16BIT;
959}
960
961
962/**
963 * Initializes the execution state.
964 *
965 * @param pVCpu The cross context virtual CPU structure of the
966 * calling thread.
967 * @param fBypassHandlers Whether to bypass access handlers.
968 *
969 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
970 * side-effects in strict builds.
971 */
972DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
973{
974 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
975
976 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
977
978#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
979 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
980 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
981 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
982 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
983 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
984 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
985 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
986 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
987#endif
988
989#ifdef VBOX_WITH_RAW_MODE_NOT_R0
990 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
991#endif
992 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
993 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
994#ifdef VBOX_STRICT
995 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
996 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
997 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
998 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
999 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1000 pVCpu->iem.s.uRexReg = 127;
1001 pVCpu->iem.s.uRexB = 127;
1002 pVCpu->iem.s.uRexIndex = 127;
1003 pVCpu->iem.s.iEffSeg = 127;
1004 pVCpu->iem.s.idxPrefix = 127;
1005 pVCpu->iem.s.uVex3rdReg = 127;
1006 pVCpu->iem.s.uVexLength = 127;
1007 pVCpu->iem.s.fEvexStuff = 127;
1008 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1009# ifdef IEM_WITH_CODE_TLB
1010 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1011 pVCpu->iem.s.pbInstrBuf = NULL;
1012 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1013 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1014 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1015 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1016# else
1017 pVCpu->iem.s.offOpcode = 127;
1018 pVCpu->iem.s.cbOpcode = 127;
1019# endif
1020#endif
1021
1022 pVCpu->iem.s.cActiveMappings = 0;
1023 pVCpu->iem.s.iNextMapping = 0;
1024 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1025 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1026#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1027 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1028 && pCtx->cs.u64Base == 0
1029 && pCtx->cs.u32Limit == UINT32_MAX
1030 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1031 if (!pVCpu->iem.s.fInPatchCode)
1032 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1033#endif
1034
1035#ifdef IEM_VERIFICATION_MODE_FULL
1036 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1037 pVCpu->iem.s.fNoRem = true;
1038#endif
1039}
1040
1041#ifdef VBOX_WITH_NESTED_HWVIRT
1042/**
1043 * Performs a minimal reinitialization of the execution state.
1044 *
1045 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1046 * 'world-switch' types operations on the CPU. Currently only nested
1047 * hardware-virtualization uses it.
1048 *
1049 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1050 */
1051IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1052{
1053 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1054 IEMMODE const enmMode = iemCalcCpuMode(pCtx);
1055 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1056
1057 pVCpu->iem.s.uCpl = uCpl;
1058 pVCpu->iem.s.enmCpuMode = enmMode;
1059 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1060 pVCpu->iem.s.enmEffAddrMode = enmMode;
1061 if (enmMode != IEMMODE_64BIT)
1062 {
1063 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1064 pVCpu->iem.s.enmEffOpSize = enmMode;
1065 }
1066 else
1067 {
1068 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1069 pVCpu->iem.s.enmEffOpSize = enmMode;
1070 }
1071 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1072#ifndef IEM_WITH_CODE_TLB
1073 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1074 pVCpu->iem.s.offOpcode = 0;
1075 pVCpu->iem.s.cbOpcode = 0;
1076#endif
1077}
1078#endif
1079
1080/**
1081 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1082 *
1083 * @param pVCpu The cross context virtual CPU structure of the
1084 * calling thread.
1085 */
1086DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1087{
1088 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1089#ifdef IEM_VERIFICATION_MODE_FULL
1090 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1091#endif
1092#ifdef VBOX_STRICT
1093# ifdef IEM_WITH_CODE_TLB
1094 NOREF(pVCpu);
1095# else
1096 pVCpu->iem.s.cbOpcode = 0;
1097# endif
1098#else
1099 NOREF(pVCpu);
1100#endif
1101}
1102
1103
1104/**
1105 * Initializes the decoder state.
1106 *
1107 * iemReInitDecoder is mostly a copy of this function.
1108 *
1109 * @param pVCpu The cross context virtual CPU structure of the
1110 * calling thread.
1111 * @param fBypassHandlers Whether to bypass access handlers.
1112 */
1113DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1114{
1115 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1116
1117 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1118
1119#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1122 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1123 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1128#endif
1129
1130#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1131 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1132#endif
1133 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1134#ifdef IEM_VERIFICATION_MODE_FULL
1135 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1136 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1137#endif
1138 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1139 pVCpu->iem.s.enmCpuMode = enmMode;
1140 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1141 pVCpu->iem.s.enmEffAddrMode = enmMode;
1142 if (enmMode != IEMMODE_64BIT)
1143 {
1144 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1145 pVCpu->iem.s.enmEffOpSize = enmMode;
1146 }
1147 else
1148 {
1149 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1150 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1151 }
1152 pVCpu->iem.s.fPrefixes = 0;
1153 pVCpu->iem.s.uRexReg = 0;
1154 pVCpu->iem.s.uRexB = 0;
1155 pVCpu->iem.s.uRexIndex = 0;
1156 pVCpu->iem.s.idxPrefix = 0;
1157 pVCpu->iem.s.uVex3rdReg = 0;
1158 pVCpu->iem.s.uVexLength = 0;
1159 pVCpu->iem.s.fEvexStuff = 0;
1160 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1161#ifdef IEM_WITH_CODE_TLB
1162 pVCpu->iem.s.pbInstrBuf = NULL;
1163 pVCpu->iem.s.offInstrNextByte = 0;
1164 pVCpu->iem.s.offCurInstrStart = 0;
1165# ifdef VBOX_STRICT
1166 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1167 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1168 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1169# endif
1170#else
1171 pVCpu->iem.s.offOpcode = 0;
1172 pVCpu->iem.s.cbOpcode = 0;
1173#endif
1174 pVCpu->iem.s.cActiveMappings = 0;
1175 pVCpu->iem.s.iNextMapping = 0;
1176 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1177 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1178#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1179 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1180 && pCtx->cs.u64Base == 0
1181 && pCtx->cs.u32Limit == UINT32_MAX
1182 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1183 if (!pVCpu->iem.s.fInPatchCode)
1184 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1185#endif
1186
1187#ifdef DBGFTRACE_ENABLED
1188 switch (enmMode)
1189 {
1190 case IEMMODE_64BIT:
1191 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1192 break;
1193 case IEMMODE_32BIT:
1194 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1195 break;
1196 case IEMMODE_16BIT:
1197 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1198 break;
1199 }
1200#endif
1201}
1202
1203
1204/**
1205 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1206 *
1207 * This is mostly a copy of iemInitDecoder.
1208 *
1209 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1210 */
1211DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1212{
1213 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1214
1215 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1216
1217#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1218 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1220 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1221 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1222 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1224 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1226#endif
1227
1228 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1229#ifdef IEM_VERIFICATION_MODE_FULL
1230 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1231 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1232#endif
1233 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1234 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1235 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1236 pVCpu->iem.s.enmEffAddrMode = enmMode;
1237 if (enmMode != IEMMODE_64BIT)
1238 {
1239 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1240 pVCpu->iem.s.enmEffOpSize = enmMode;
1241 }
1242 else
1243 {
1244 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1245 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1246 }
1247 pVCpu->iem.s.fPrefixes = 0;
1248 pVCpu->iem.s.uRexReg = 0;
1249 pVCpu->iem.s.uRexB = 0;
1250 pVCpu->iem.s.uRexIndex = 0;
1251 pVCpu->iem.s.idxPrefix = 0;
1252 pVCpu->iem.s.uVex3rdReg = 0;
1253 pVCpu->iem.s.uVexLength = 0;
1254 pVCpu->iem.s.fEvexStuff = 0;
1255 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1256#ifdef IEM_WITH_CODE_TLB
1257 if (pVCpu->iem.s.pbInstrBuf)
1258 {
1259 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1260 - pVCpu->iem.s.uInstrBufPc;
1261 if (off < pVCpu->iem.s.cbInstrBufTotal)
1262 {
1263 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1264 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1265 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1266 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1267 else
1268 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1269 }
1270 else
1271 {
1272 pVCpu->iem.s.pbInstrBuf = NULL;
1273 pVCpu->iem.s.offInstrNextByte = 0;
1274 pVCpu->iem.s.offCurInstrStart = 0;
1275 pVCpu->iem.s.cbInstrBuf = 0;
1276 pVCpu->iem.s.cbInstrBufTotal = 0;
1277 }
1278 }
1279 else
1280 {
1281 pVCpu->iem.s.offInstrNextByte = 0;
1282 pVCpu->iem.s.offCurInstrStart = 0;
1283 pVCpu->iem.s.cbInstrBuf = 0;
1284 pVCpu->iem.s.cbInstrBufTotal = 0;
1285 }
1286#else
1287 pVCpu->iem.s.cbOpcode = 0;
1288 pVCpu->iem.s.offOpcode = 0;
1289#endif
1290 Assert(pVCpu->iem.s.cActiveMappings == 0);
1291 pVCpu->iem.s.iNextMapping = 0;
1292 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1293 Assert(pVCpu->iem.s.fBypassHandlers == false);
1294#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1295 if (!pVCpu->iem.s.fInPatchCode)
1296 { /* likely */ }
1297 else
1298 {
1299 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1300 && pCtx->cs.u64Base == 0
1301 && pCtx->cs.u32Limit == UINT32_MAX
1302 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1303 if (!pVCpu->iem.s.fInPatchCode)
1304 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1305 }
1306#endif
1307
1308#ifdef DBGFTRACE_ENABLED
1309 switch (enmMode)
1310 {
1311 case IEMMODE_64BIT:
1312 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1313 break;
1314 case IEMMODE_32BIT:
1315 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1316 break;
1317 case IEMMODE_16BIT:
1318 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1319 break;
1320 }
1321#endif
1322}
1323
1324
1325
1326/**
1327 * Prefetch opcodes the first time when starting executing.
1328 *
1329 * @returns Strict VBox status code.
1330 * @param pVCpu The cross context virtual CPU structure of the
1331 * calling thread.
1332 * @param fBypassHandlers Whether to bypass access handlers.
1333 */
1334IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1335{
1336#ifdef IEM_VERIFICATION_MODE_FULL
1337 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1338#endif
1339 iemInitDecoder(pVCpu, fBypassHandlers);
1340
1341#ifdef IEM_WITH_CODE_TLB
1342 /** @todo Do ITLB lookup here. */
1343
1344#else /* !IEM_WITH_CODE_TLB */
1345
1346 /*
1347 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1348 *
1349 * First translate CS:rIP to a physical address.
1350 */
1351 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1352 uint32_t cbToTryRead;
1353 RTGCPTR GCPtrPC;
1354 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1355 {
1356 cbToTryRead = PAGE_SIZE;
1357 GCPtrPC = pCtx->rip;
1358 if (IEM_IS_CANONICAL(GCPtrPC))
1359 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1360 else
1361 return iemRaiseGeneralProtectionFault0(pVCpu);
1362 }
1363 else
1364 {
1365 uint32_t GCPtrPC32 = pCtx->eip;
1366 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1367 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1368 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1369 else
1370 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1371 if (cbToTryRead) { /* likely */ }
1372 else /* overflowed */
1373 {
1374 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1375 cbToTryRead = UINT32_MAX;
1376 }
1377 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1378 Assert(GCPtrPC <= UINT32_MAX);
1379 }
1380
1381# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1382 /* Allow interpretation of patch manager code blocks since they can for
1383 instance throw #PFs for perfectly good reasons. */
1384 if (pVCpu->iem.s.fInPatchCode)
1385 {
1386 size_t cbRead = 0;
1387 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1388 AssertRCReturn(rc, rc);
1389 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1390 return VINF_SUCCESS;
1391 }
1392# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1393
1394 RTGCPHYS GCPhys;
1395 uint64_t fFlags;
1396 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1397 if (RT_SUCCESS(rc)) { /* probable */ }
1398 else
1399 {
1400 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1401 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1402 }
1403 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1404 else
1405 {
1406 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1407 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1408 }
1409 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1410 else
1411 {
1412 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1413 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1414 }
1415 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1416 /** @todo Check reserved bits and such stuff. PGM is better at doing
1417 * that, so do it when implementing the guest virtual address
1418 * TLB... */
1419
1420# ifdef IEM_VERIFICATION_MODE_FULL
1421 /*
1422 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1423 * instruction.
1424 */
1425 /** @todo optimize this differently by not using PGMPhysRead. */
1426 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1427 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1428 if ( offPrevOpcodes < cbOldOpcodes
1429 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1430 {
1431 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1432 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1433 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1434 pVCpu->iem.s.cbOpcode = cbNew;
1435 return VINF_SUCCESS;
1436 }
1437# endif
1438
1439 /*
1440 * Read the bytes at this address.
1441 */
1442 PVM pVM = pVCpu->CTX_SUFF(pVM);
1443# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1444 size_t cbActual;
1445 if ( PATMIsEnabled(pVM)
1446 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1447 {
1448 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1449 Assert(cbActual > 0);
1450 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1451 }
1452 else
1453# endif
1454 {
1455 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1456 if (cbToTryRead > cbLeftOnPage)
1457 cbToTryRead = cbLeftOnPage;
1458 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1459 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1460
1461 if (!pVCpu->iem.s.fBypassHandlers)
1462 {
1463 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1464 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1465 { /* likely */ }
1466 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1467 {
1468 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1469 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1470 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1471 }
1472 else
1473 {
1474 Log((RT_SUCCESS(rcStrict)
1475 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1476 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1477 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1478 return rcStrict;
1479 }
1480 }
1481 else
1482 {
1483 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1484 if (RT_SUCCESS(rc))
1485 { /* likely */ }
1486 else
1487 {
1488 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1489 GCPtrPC, GCPhys, rc, cbToTryRead));
1490 return rc;
1491 }
1492 }
1493 pVCpu->iem.s.cbOpcode = cbToTryRead;
1494 }
1495#endif /* !IEM_WITH_CODE_TLB */
1496 return VINF_SUCCESS;
1497}
1498
1499
1500/**
1501 * Invalidates the IEM TLBs.
1502 *
1503 * This is called internally as well as by PGM when moving GC mappings.
1504 *
1505 * @returns
1506 * @param pVCpu The cross context virtual CPU structure of the calling
1507 * thread.
1508 * @param fVmm Set when PGM calls us with a remapping.
1509 */
1510VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1511{
1512#ifdef IEM_WITH_CODE_TLB
1513 pVCpu->iem.s.cbInstrBufTotal = 0;
1514 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1515 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1516 { /* very likely */ }
1517 else
1518 {
1519 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1520 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1521 while (i-- > 0)
1522 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1523 }
1524#endif
1525
1526#ifdef IEM_WITH_DATA_TLB
1527 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1528 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1529 { /* very likely */ }
1530 else
1531 {
1532 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1533 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1534 while (i-- > 0)
1535 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1536 }
1537#endif
1538 NOREF(pVCpu); NOREF(fVmm);
1539}
1540
1541
1542/**
1543 * Invalidates a page in the TLBs.
1544 *
1545 * @param pVCpu The cross context virtual CPU structure of the calling
1546 * thread.
1547 * @param GCPtr The address of the page to invalidate
1548 */
1549VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1550{
1551#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1552 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1553 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1554 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1555 uintptr_t idx = (uint8_t)GCPtr;
1556
1557# ifdef IEM_WITH_CODE_TLB
1558 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1559 {
1560 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1561 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1562 pVCpu->iem.s.cbInstrBufTotal = 0;
1563 }
1564# endif
1565
1566# ifdef IEM_WITH_DATA_TLB
1567 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1568 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1569# endif
1570#else
1571 NOREF(pVCpu); NOREF(GCPtr);
1572#endif
1573}
1574
1575
1576/**
1577 * Invalidates the host physical aspects of the IEM TLBs.
1578 *
1579 * This is called internally as well as by PGM when moving GC mappings.
1580 *
1581 * @param pVCpu The cross context virtual CPU structure of the calling
1582 * thread.
1583 */
1584VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1585{
1586#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1587 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1588
1589# ifdef IEM_WITH_CODE_TLB
1590 pVCpu->iem.s.cbInstrBufTotal = 0;
1591# endif
1592 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1593 if (uTlbPhysRev != 0)
1594 {
1595 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1596 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1597 }
1598 else
1599 {
1600 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1601 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1602
1603 unsigned i;
1604# ifdef IEM_WITH_CODE_TLB
1605 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1606 while (i-- > 0)
1607 {
1608 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1609 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1610 }
1611# endif
1612# ifdef IEM_WITH_DATA_TLB
1613 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1614 while (i-- > 0)
1615 {
1616 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1617 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1618 }
1619# endif
1620 }
1621#else
1622 NOREF(pVCpu);
1623#endif
1624}
1625
1626
1627/**
1628 * Invalidates the host physical aspects of the IEM TLBs.
1629 *
1630 * This is called internally as well as by PGM when moving GC mappings.
1631 *
1632 * @param pVM The cross context VM structure.
1633 *
1634 * @remarks Caller holds the PGM lock.
1635 */
1636VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1637{
1638 RT_NOREF_PV(pVM);
1639}
1640
1641#ifdef IEM_WITH_CODE_TLB
1642
1643/**
1644 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1645 * failure and jumps.
1646 *
1647 * We end up here for a number of reasons:
1648 * - pbInstrBuf isn't yet initialized.
1649 * - Advancing beyond the buffer boundrary (e.g. cross page).
1650 * - Advancing beyond the CS segment limit.
1651 * - Fetching from non-mappable page (e.g. MMIO).
1652 *
1653 * @param pVCpu The cross context virtual CPU structure of the
1654 * calling thread.
1655 * @param pvDst Where to return the bytes.
1656 * @param cbDst Number of bytes to read.
1657 *
1658 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1659 */
1660IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1661{
1662#ifdef IN_RING3
1663//__debugbreak();
1664 for (;;)
1665 {
1666 Assert(cbDst <= 8);
1667 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1668
1669 /*
1670 * We might have a partial buffer match, deal with that first to make the
1671 * rest simpler. This is the first part of the cross page/buffer case.
1672 */
1673 if (pVCpu->iem.s.pbInstrBuf != NULL)
1674 {
1675 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1676 {
1677 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1678 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1679 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1680
1681 cbDst -= cbCopy;
1682 pvDst = (uint8_t *)pvDst + cbCopy;
1683 offBuf += cbCopy;
1684 pVCpu->iem.s.offInstrNextByte += offBuf;
1685 }
1686 }
1687
1688 /*
1689 * Check segment limit, figuring how much we're allowed to access at this point.
1690 *
1691 * We will fault immediately if RIP is past the segment limit / in non-canonical
1692 * territory. If we do continue, there are one or more bytes to read before we
1693 * end up in trouble and we need to do that first before faulting.
1694 */
1695 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1696 RTGCPTR GCPtrFirst;
1697 uint32_t cbMaxRead;
1698 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1699 {
1700 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1701 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1702 { /* likely */ }
1703 else
1704 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1705 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1706 }
1707 else
1708 {
1709 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1710 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1711 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1712 { /* likely */ }
1713 else
1714 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1715 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1716 if (cbMaxRead != 0)
1717 { /* likely */ }
1718 else
1719 {
1720 /* Overflowed because address is 0 and limit is max. */
1721 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1722 cbMaxRead = X86_PAGE_SIZE;
1723 }
1724 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1725 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1726 if (cbMaxRead2 < cbMaxRead)
1727 cbMaxRead = cbMaxRead2;
1728 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1729 }
1730
1731 /*
1732 * Get the TLB entry for this piece of code.
1733 */
1734 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1735 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1736 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1737 if (pTlbe->uTag == uTag)
1738 {
1739 /* likely when executing lots of code, otherwise unlikely */
1740# ifdef VBOX_WITH_STATISTICS
1741 pVCpu->iem.s.CodeTlb.cTlbHits++;
1742# endif
1743 }
1744 else
1745 {
1746 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1747# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1748 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1749 {
1750 pTlbe->uTag = uTag;
1751 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1752 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1753 pTlbe->GCPhys = NIL_RTGCPHYS;
1754 pTlbe->pbMappingR3 = NULL;
1755 }
1756 else
1757# endif
1758 {
1759 RTGCPHYS GCPhys;
1760 uint64_t fFlags;
1761 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1762 if (RT_FAILURE(rc))
1763 {
1764 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1765 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1766 }
1767
1768 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1769 pTlbe->uTag = uTag;
1770 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1771 pTlbe->GCPhys = GCPhys;
1772 pTlbe->pbMappingR3 = NULL;
1773 }
1774 }
1775
1776 /*
1777 * Check TLB page table level access flags.
1778 */
1779 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1780 {
1781 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1782 {
1783 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1784 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1785 }
1786 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1787 {
1788 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1789 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1790 }
1791 }
1792
1793# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1794 /*
1795 * Allow interpretation of patch manager code blocks since they can for
1796 * instance throw #PFs for perfectly good reasons.
1797 */
1798 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1799 { /* no unlikely */ }
1800 else
1801 {
1802 /** @todo Could be optimized this a little in ring-3 if we liked. */
1803 size_t cbRead = 0;
1804 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1805 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1806 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1807 return;
1808 }
1809# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1810
1811 /*
1812 * Look up the physical page info if necessary.
1813 */
1814 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1815 { /* not necessary */ }
1816 else
1817 {
1818 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1819 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1820 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1821 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1822 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1823 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1824 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1825 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1826 }
1827
1828# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1829 /*
1830 * Try do a direct read using the pbMappingR3 pointer.
1831 */
1832 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1833 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1834 {
1835 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1836 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1837 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1838 {
1839 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1840 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1841 }
1842 else
1843 {
1844 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1845 Assert(cbInstr < cbMaxRead);
1846 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1847 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1848 }
1849 if (cbDst <= cbMaxRead)
1850 {
1851 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1852 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1853 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1854 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1855 return;
1856 }
1857 pVCpu->iem.s.pbInstrBuf = NULL;
1858
1859 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1860 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1861 }
1862 else
1863# endif
1864#if 0
1865 /*
1866 * If there is no special read handling, so we can read a bit more and
1867 * put it in the prefetch buffer.
1868 */
1869 if ( cbDst < cbMaxRead
1870 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1871 {
1872 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1873 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1874 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1875 { /* likely */ }
1876 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1877 {
1878 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1879 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1880 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1881 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1882 }
1883 else
1884 {
1885 Log((RT_SUCCESS(rcStrict)
1886 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1887 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1888 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1889 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1890 }
1891 }
1892 /*
1893 * Special read handling, so only read exactly what's needed.
1894 * This is a highly unlikely scenario.
1895 */
1896 else
1897#endif
1898 {
1899 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1900 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1901 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1902 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1903 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1904 { /* likely */ }
1905 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1906 {
1907 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1908 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1909 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1910 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1911 }
1912 else
1913 {
1914 Log((RT_SUCCESS(rcStrict)
1915 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1916 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1917 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1918 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1919 }
1920 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1921 if (cbToRead == cbDst)
1922 return;
1923 }
1924
1925 /*
1926 * More to read, loop.
1927 */
1928 cbDst -= cbMaxRead;
1929 pvDst = (uint8_t *)pvDst + cbMaxRead;
1930 }
1931#else
1932 RT_NOREF(pvDst, cbDst);
1933 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1934#endif
1935}
1936
1937#else
1938
1939/**
1940 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1941 * exception if it fails.
1942 *
1943 * @returns Strict VBox status code.
1944 * @param pVCpu The cross context virtual CPU structure of the
1945 * calling thread.
1946 * @param cbMin The minimum number of bytes relative offOpcode
1947 * that must be read.
1948 */
1949IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1950{
1951 /*
1952 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1953 *
1954 * First translate CS:rIP to a physical address.
1955 */
1956 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1957 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1958 uint32_t cbToTryRead;
1959 RTGCPTR GCPtrNext;
1960 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1961 {
1962 cbToTryRead = PAGE_SIZE;
1963 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1964 if (!IEM_IS_CANONICAL(GCPtrNext))
1965 return iemRaiseGeneralProtectionFault0(pVCpu);
1966 }
1967 else
1968 {
1969 uint32_t GCPtrNext32 = pCtx->eip;
1970 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1971 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1972 if (GCPtrNext32 > pCtx->cs.u32Limit)
1973 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1974 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1975 if (!cbToTryRead) /* overflowed */
1976 {
1977 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1978 cbToTryRead = UINT32_MAX;
1979 /** @todo check out wrapping around the code segment. */
1980 }
1981 if (cbToTryRead < cbMin - cbLeft)
1982 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1983 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1984 }
1985
1986 /* Only read up to the end of the page, and make sure we don't read more
1987 than the opcode buffer can hold. */
1988 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1989 if (cbToTryRead > cbLeftOnPage)
1990 cbToTryRead = cbLeftOnPage;
1991 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1992 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1993/** @todo r=bird: Convert assertion into undefined opcode exception? */
1994 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1995
1996# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1997 /* Allow interpretation of patch manager code blocks since they can for
1998 instance throw #PFs for perfectly good reasons. */
1999 if (pVCpu->iem.s.fInPatchCode)
2000 {
2001 size_t cbRead = 0;
2002 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2003 AssertRCReturn(rc, rc);
2004 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2005 return VINF_SUCCESS;
2006 }
2007# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2008
2009 RTGCPHYS GCPhys;
2010 uint64_t fFlags;
2011 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2012 if (RT_FAILURE(rc))
2013 {
2014 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2015 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2016 }
2017 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2018 {
2019 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2020 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2021 }
2022 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2023 {
2024 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2025 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2026 }
2027 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2028 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2029 /** @todo Check reserved bits and such stuff. PGM is better at doing
2030 * that, so do it when implementing the guest virtual address
2031 * TLB... */
2032
2033 /*
2034 * Read the bytes at this address.
2035 *
2036 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2037 * and since PATM should only patch the start of an instruction there
2038 * should be no need to check again here.
2039 */
2040 if (!pVCpu->iem.s.fBypassHandlers)
2041 {
2042 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2043 cbToTryRead, PGMACCESSORIGIN_IEM);
2044 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2045 { /* likely */ }
2046 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2047 {
2048 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2049 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2050 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2051 }
2052 else
2053 {
2054 Log((RT_SUCCESS(rcStrict)
2055 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2056 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2057 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2058 return rcStrict;
2059 }
2060 }
2061 else
2062 {
2063 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2064 if (RT_SUCCESS(rc))
2065 { /* likely */ }
2066 else
2067 {
2068 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2069 return rc;
2070 }
2071 }
2072 pVCpu->iem.s.cbOpcode += cbToTryRead;
2073 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2074
2075 return VINF_SUCCESS;
2076}
2077
2078#endif /* !IEM_WITH_CODE_TLB */
2079#ifndef IEM_WITH_SETJMP
2080
2081/**
2082 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2083 *
2084 * @returns Strict VBox status code.
2085 * @param pVCpu The cross context virtual CPU structure of the
2086 * calling thread.
2087 * @param pb Where to return the opcode byte.
2088 */
2089DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2090{
2091 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2092 if (rcStrict == VINF_SUCCESS)
2093 {
2094 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2095 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2096 pVCpu->iem.s.offOpcode = offOpcode + 1;
2097 }
2098 else
2099 *pb = 0;
2100 return rcStrict;
2101}
2102
2103
2104/**
2105 * Fetches the next opcode byte.
2106 *
2107 * @returns Strict VBox status code.
2108 * @param pVCpu The cross context virtual CPU structure of the
2109 * calling thread.
2110 * @param pu8 Where to return the opcode byte.
2111 */
2112DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2113{
2114 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2115 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2116 {
2117 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2118 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2119 return VINF_SUCCESS;
2120 }
2121 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2122}
2123
2124#else /* IEM_WITH_SETJMP */
2125
2126/**
2127 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2128 *
2129 * @returns The opcode byte.
2130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2131 */
2132DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2133{
2134# ifdef IEM_WITH_CODE_TLB
2135 uint8_t u8;
2136 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2137 return u8;
2138# else
2139 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2140 if (rcStrict == VINF_SUCCESS)
2141 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2142 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2143# endif
2144}
2145
2146
2147/**
2148 * Fetches the next opcode byte, longjmp on error.
2149 *
2150 * @returns The opcode byte.
2151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2152 */
2153DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2154{
2155# ifdef IEM_WITH_CODE_TLB
2156 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2157 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2158 if (RT_LIKELY( pbBuf != NULL
2159 && offBuf < pVCpu->iem.s.cbInstrBuf))
2160 {
2161 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2162 return pbBuf[offBuf];
2163 }
2164# else
2165 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2166 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2167 {
2168 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2169 return pVCpu->iem.s.abOpcode[offOpcode];
2170 }
2171# endif
2172 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2173}
2174
2175#endif /* IEM_WITH_SETJMP */
2176
2177/**
2178 * Fetches the next opcode byte, returns automatically on failure.
2179 *
2180 * @param a_pu8 Where to return the opcode byte.
2181 * @remark Implicitly references pVCpu.
2182 */
2183#ifndef IEM_WITH_SETJMP
2184# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2185 do \
2186 { \
2187 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2188 if (rcStrict2 == VINF_SUCCESS) \
2189 { /* likely */ } \
2190 else \
2191 return rcStrict2; \
2192 } while (0)
2193#else
2194# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2195#endif /* IEM_WITH_SETJMP */
2196
2197
2198#ifndef IEM_WITH_SETJMP
2199/**
2200 * Fetches the next signed byte from the opcode stream.
2201 *
2202 * @returns Strict VBox status code.
2203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2204 * @param pi8 Where to return the signed byte.
2205 */
2206DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2207{
2208 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2209}
2210#endif /* !IEM_WITH_SETJMP */
2211
2212
2213/**
2214 * Fetches the next signed byte from the opcode stream, returning automatically
2215 * on failure.
2216 *
2217 * @param a_pi8 Where to return the signed byte.
2218 * @remark Implicitly references pVCpu.
2219 */
2220#ifndef IEM_WITH_SETJMP
2221# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2222 do \
2223 { \
2224 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2225 if (rcStrict2 != VINF_SUCCESS) \
2226 return rcStrict2; \
2227 } while (0)
2228#else /* IEM_WITH_SETJMP */
2229# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2230
2231#endif /* IEM_WITH_SETJMP */
2232
2233#ifndef IEM_WITH_SETJMP
2234
2235/**
2236 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2237 *
2238 * @returns Strict VBox status code.
2239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2240 * @param pu16 Where to return the opcode dword.
2241 */
2242DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2243{
2244 uint8_t u8;
2245 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2246 if (rcStrict == VINF_SUCCESS)
2247 *pu16 = (int8_t)u8;
2248 return rcStrict;
2249}
2250
2251
2252/**
2253 * Fetches the next signed byte from the opcode stream, extending it to
2254 * unsigned 16-bit.
2255 *
2256 * @returns Strict VBox status code.
2257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2258 * @param pu16 Where to return the unsigned word.
2259 */
2260DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2261{
2262 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2263 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2264 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2265
2266 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2267 pVCpu->iem.s.offOpcode = offOpcode + 1;
2268 return VINF_SUCCESS;
2269}
2270
2271#endif /* !IEM_WITH_SETJMP */
2272
2273/**
2274 * Fetches the next signed byte from the opcode stream and sign-extending it to
2275 * a word, returning automatically on failure.
2276 *
2277 * @param a_pu16 Where to return the word.
2278 * @remark Implicitly references pVCpu.
2279 */
2280#ifndef IEM_WITH_SETJMP
2281# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2282 do \
2283 { \
2284 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2285 if (rcStrict2 != VINF_SUCCESS) \
2286 return rcStrict2; \
2287 } while (0)
2288#else
2289# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2290#endif
2291
2292#ifndef IEM_WITH_SETJMP
2293
2294/**
2295 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2296 *
2297 * @returns Strict VBox status code.
2298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2299 * @param pu32 Where to return the opcode dword.
2300 */
2301DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2302{
2303 uint8_t u8;
2304 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2305 if (rcStrict == VINF_SUCCESS)
2306 *pu32 = (int8_t)u8;
2307 return rcStrict;
2308}
2309
2310
2311/**
2312 * Fetches the next signed byte from the opcode stream, extending it to
2313 * unsigned 32-bit.
2314 *
2315 * @returns Strict VBox status code.
2316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2317 * @param pu32 Where to return the unsigned dword.
2318 */
2319DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2320{
2321 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2322 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2323 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2324
2325 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2326 pVCpu->iem.s.offOpcode = offOpcode + 1;
2327 return VINF_SUCCESS;
2328}
2329
2330#endif /* !IEM_WITH_SETJMP */
2331
2332/**
2333 * Fetches the next signed byte from the opcode stream and sign-extending it to
2334 * a word, returning automatically on failure.
2335 *
2336 * @param a_pu32 Where to return the word.
2337 * @remark Implicitly references pVCpu.
2338 */
2339#ifndef IEM_WITH_SETJMP
2340#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2341 do \
2342 { \
2343 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2344 if (rcStrict2 != VINF_SUCCESS) \
2345 return rcStrict2; \
2346 } while (0)
2347#else
2348# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2349#endif
2350
2351#ifndef IEM_WITH_SETJMP
2352
2353/**
2354 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2355 *
2356 * @returns Strict VBox status code.
2357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2358 * @param pu64 Where to return the opcode qword.
2359 */
2360DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2361{
2362 uint8_t u8;
2363 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2364 if (rcStrict == VINF_SUCCESS)
2365 *pu64 = (int8_t)u8;
2366 return rcStrict;
2367}
2368
2369
2370/**
2371 * Fetches the next signed byte from the opcode stream, extending it to
2372 * unsigned 64-bit.
2373 *
2374 * @returns Strict VBox status code.
2375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2376 * @param pu64 Where to return the unsigned qword.
2377 */
2378DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2379{
2380 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2381 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2382 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2383
2384 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2385 pVCpu->iem.s.offOpcode = offOpcode + 1;
2386 return VINF_SUCCESS;
2387}
2388
2389#endif /* !IEM_WITH_SETJMP */
2390
2391
2392/**
2393 * Fetches the next signed byte from the opcode stream and sign-extending it to
2394 * a word, returning automatically on failure.
2395 *
2396 * @param a_pu64 Where to return the word.
2397 * @remark Implicitly references pVCpu.
2398 */
2399#ifndef IEM_WITH_SETJMP
2400# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2401 do \
2402 { \
2403 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2404 if (rcStrict2 != VINF_SUCCESS) \
2405 return rcStrict2; \
2406 } while (0)
2407#else
2408# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2409#endif
2410
2411
2412#ifndef IEM_WITH_SETJMP
2413
2414/**
2415 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2416 *
2417 * @returns Strict VBox status code.
2418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2419 * @param pu16 Where to return the opcode word.
2420 */
2421DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2422{
2423 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2424 if (rcStrict == VINF_SUCCESS)
2425 {
2426 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2427# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2428 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2429# else
2430 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2431# endif
2432 pVCpu->iem.s.offOpcode = offOpcode + 2;
2433 }
2434 else
2435 *pu16 = 0;
2436 return rcStrict;
2437}
2438
2439
2440/**
2441 * Fetches the next opcode word.
2442 *
2443 * @returns Strict VBox status code.
2444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2445 * @param pu16 Where to return the opcode word.
2446 */
2447DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2448{
2449 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2450 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2451 {
2452 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2453# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2454 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2455# else
2456 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2457# endif
2458 return VINF_SUCCESS;
2459 }
2460 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2461}
2462
2463#else /* IEM_WITH_SETJMP */
2464
2465/**
2466 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2467 *
2468 * @returns The opcode word.
2469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2470 */
2471DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2472{
2473# ifdef IEM_WITH_CODE_TLB
2474 uint16_t u16;
2475 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2476 return u16;
2477# else
2478 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2479 if (rcStrict == VINF_SUCCESS)
2480 {
2481 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2482 pVCpu->iem.s.offOpcode += 2;
2483# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2484 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2485# else
2486 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2487# endif
2488 }
2489 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2490# endif
2491}
2492
2493
2494/**
2495 * Fetches the next opcode word, longjmp on error.
2496 *
2497 * @returns The opcode word.
2498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2499 */
2500DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2501{
2502# ifdef IEM_WITH_CODE_TLB
2503 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2504 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2505 if (RT_LIKELY( pbBuf != NULL
2506 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2507 {
2508 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2509# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2510 return *(uint16_t const *)&pbBuf[offBuf];
2511# else
2512 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2513# endif
2514 }
2515# else
2516 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2517 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2518 {
2519 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2520# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2521 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2522# else
2523 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2524# endif
2525 }
2526# endif
2527 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2528}
2529
2530#endif /* IEM_WITH_SETJMP */
2531
2532
2533/**
2534 * Fetches the next opcode word, returns automatically on failure.
2535 *
2536 * @param a_pu16 Where to return the opcode word.
2537 * @remark Implicitly references pVCpu.
2538 */
2539#ifndef IEM_WITH_SETJMP
2540# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2541 do \
2542 { \
2543 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2544 if (rcStrict2 != VINF_SUCCESS) \
2545 return rcStrict2; \
2546 } while (0)
2547#else
2548# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2549#endif
2550
2551#ifndef IEM_WITH_SETJMP
2552
2553/**
2554 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2555 *
2556 * @returns Strict VBox status code.
2557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2558 * @param pu32 Where to return the opcode double word.
2559 */
2560DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2561{
2562 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2563 if (rcStrict == VINF_SUCCESS)
2564 {
2565 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2566 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2567 pVCpu->iem.s.offOpcode = offOpcode + 2;
2568 }
2569 else
2570 *pu32 = 0;
2571 return rcStrict;
2572}
2573
2574
2575/**
2576 * Fetches the next opcode word, zero extending it to a double word.
2577 *
2578 * @returns Strict VBox status code.
2579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2580 * @param pu32 Where to return the opcode double word.
2581 */
2582DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2583{
2584 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2585 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2586 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2587
2588 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2589 pVCpu->iem.s.offOpcode = offOpcode + 2;
2590 return VINF_SUCCESS;
2591}
2592
2593#endif /* !IEM_WITH_SETJMP */
2594
2595
2596/**
2597 * Fetches the next opcode word and zero extends it to a double word, returns
2598 * automatically on failure.
2599 *
2600 * @param a_pu32 Where to return the opcode double word.
2601 * @remark Implicitly references pVCpu.
2602 */
2603#ifndef IEM_WITH_SETJMP
2604# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2605 do \
2606 { \
2607 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2608 if (rcStrict2 != VINF_SUCCESS) \
2609 return rcStrict2; \
2610 } while (0)
2611#else
2612# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2613#endif
2614
2615#ifndef IEM_WITH_SETJMP
2616
2617/**
2618 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2619 *
2620 * @returns Strict VBox status code.
2621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2622 * @param pu64 Where to return the opcode quad word.
2623 */
2624DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2625{
2626 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2627 if (rcStrict == VINF_SUCCESS)
2628 {
2629 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2630 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2631 pVCpu->iem.s.offOpcode = offOpcode + 2;
2632 }
2633 else
2634 *pu64 = 0;
2635 return rcStrict;
2636}
2637
2638
2639/**
2640 * Fetches the next opcode word, zero extending it to a quad word.
2641 *
2642 * @returns Strict VBox status code.
2643 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2644 * @param pu64 Where to return the opcode quad word.
2645 */
2646DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2647{
2648 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2649 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2650 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2651
2652 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2653 pVCpu->iem.s.offOpcode = offOpcode + 2;
2654 return VINF_SUCCESS;
2655}
2656
2657#endif /* !IEM_WITH_SETJMP */
2658
2659/**
2660 * Fetches the next opcode word and zero extends it to a quad word, returns
2661 * automatically on failure.
2662 *
2663 * @param a_pu64 Where to return the opcode quad word.
2664 * @remark Implicitly references pVCpu.
2665 */
2666#ifndef IEM_WITH_SETJMP
2667# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2668 do \
2669 { \
2670 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2671 if (rcStrict2 != VINF_SUCCESS) \
2672 return rcStrict2; \
2673 } while (0)
2674#else
2675# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2676#endif
2677
2678
2679#ifndef IEM_WITH_SETJMP
2680/**
2681 * Fetches the next signed word from the opcode stream.
2682 *
2683 * @returns Strict VBox status code.
2684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2685 * @param pi16 Where to return the signed word.
2686 */
2687DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2688{
2689 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2690}
2691#endif /* !IEM_WITH_SETJMP */
2692
2693
2694/**
2695 * Fetches the next signed word from the opcode stream, returning automatically
2696 * on failure.
2697 *
2698 * @param a_pi16 Where to return the signed word.
2699 * @remark Implicitly references pVCpu.
2700 */
2701#ifndef IEM_WITH_SETJMP
2702# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2703 do \
2704 { \
2705 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2706 if (rcStrict2 != VINF_SUCCESS) \
2707 return rcStrict2; \
2708 } while (0)
2709#else
2710# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2711#endif
2712
2713#ifndef IEM_WITH_SETJMP
2714
2715/**
2716 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2717 *
2718 * @returns Strict VBox status code.
2719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2720 * @param pu32 Where to return the opcode dword.
2721 */
2722DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2723{
2724 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2725 if (rcStrict == VINF_SUCCESS)
2726 {
2727 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2728# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2729 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2730# else
2731 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2732 pVCpu->iem.s.abOpcode[offOpcode + 1],
2733 pVCpu->iem.s.abOpcode[offOpcode + 2],
2734 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2735# endif
2736 pVCpu->iem.s.offOpcode = offOpcode + 4;
2737 }
2738 else
2739 *pu32 = 0;
2740 return rcStrict;
2741}
2742
2743
2744/**
2745 * Fetches the next opcode dword.
2746 *
2747 * @returns Strict VBox status code.
2748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2749 * @param pu32 Where to return the opcode double word.
2750 */
2751DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2752{
2753 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2754 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2755 {
2756 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2757# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2758 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2759# else
2760 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2761 pVCpu->iem.s.abOpcode[offOpcode + 1],
2762 pVCpu->iem.s.abOpcode[offOpcode + 2],
2763 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2764# endif
2765 return VINF_SUCCESS;
2766 }
2767 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2768}
2769
2770#else /* !IEM_WITH_SETJMP */
2771
2772/**
2773 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2774 *
2775 * @returns The opcode dword.
2776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2777 */
2778DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2779{
2780# ifdef IEM_WITH_CODE_TLB
2781 uint32_t u32;
2782 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2783 return u32;
2784# else
2785 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2786 if (rcStrict == VINF_SUCCESS)
2787 {
2788 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2789 pVCpu->iem.s.offOpcode = offOpcode + 4;
2790# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2791 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2792# else
2793 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2794 pVCpu->iem.s.abOpcode[offOpcode + 1],
2795 pVCpu->iem.s.abOpcode[offOpcode + 2],
2796 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2797# endif
2798 }
2799 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2800# endif
2801}
2802
2803
2804/**
2805 * Fetches the next opcode dword, longjmp on error.
2806 *
2807 * @returns The opcode dword.
2808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2809 */
2810DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2811{
2812# ifdef IEM_WITH_CODE_TLB
2813 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2814 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2815 if (RT_LIKELY( pbBuf != NULL
2816 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2817 {
2818 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2819# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2820 return *(uint32_t const *)&pbBuf[offBuf];
2821# else
2822 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2823 pbBuf[offBuf + 1],
2824 pbBuf[offBuf + 2],
2825 pbBuf[offBuf + 3]);
2826# endif
2827 }
2828# else
2829 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2830 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2831 {
2832 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2833# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2834 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2835# else
2836 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2837 pVCpu->iem.s.abOpcode[offOpcode + 1],
2838 pVCpu->iem.s.abOpcode[offOpcode + 2],
2839 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2840# endif
2841 }
2842# endif
2843 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2844}
2845
2846#endif /* !IEM_WITH_SETJMP */
2847
2848
2849/**
2850 * Fetches the next opcode dword, returns automatically on failure.
2851 *
2852 * @param a_pu32 Where to return the opcode dword.
2853 * @remark Implicitly references pVCpu.
2854 */
2855#ifndef IEM_WITH_SETJMP
2856# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2857 do \
2858 { \
2859 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2860 if (rcStrict2 != VINF_SUCCESS) \
2861 return rcStrict2; \
2862 } while (0)
2863#else
2864# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2865#endif
2866
2867#ifndef IEM_WITH_SETJMP
2868
2869/**
2870 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2871 *
2872 * @returns Strict VBox status code.
2873 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2874 * @param pu64 Where to return the opcode dword.
2875 */
2876DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2877{
2878 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2879 if (rcStrict == VINF_SUCCESS)
2880 {
2881 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2882 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2883 pVCpu->iem.s.abOpcode[offOpcode + 1],
2884 pVCpu->iem.s.abOpcode[offOpcode + 2],
2885 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2886 pVCpu->iem.s.offOpcode = offOpcode + 4;
2887 }
2888 else
2889 *pu64 = 0;
2890 return rcStrict;
2891}
2892
2893
2894/**
2895 * Fetches the next opcode dword, zero extending it to a quad word.
2896 *
2897 * @returns Strict VBox status code.
2898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2899 * @param pu64 Where to return the opcode quad word.
2900 */
2901DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2902{
2903 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2904 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2905 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2906
2907 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2908 pVCpu->iem.s.abOpcode[offOpcode + 1],
2909 pVCpu->iem.s.abOpcode[offOpcode + 2],
2910 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2911 pVCpu->iem.s.offOpcode = offOpcode + 4;
2912 return VINF_SUCCESS;
2913}
2914
2915#endif /* !IEM_WITH_SETJMP */
2916
2917
2918/**
2919 * Fetches the next opcode dword and zero extends it to a quad word, returns
2920 * automatically on failure.
2921 *
2922 * @param a_pu64 Where to return the opcode quad word.
2923 * @remark Implicitly references pVCpu.
2924 */
2925#ifndef IEM_WITH_SETJMP
2926# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2927 do \
2928 { \
2929 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2930 if (rcStrict2 != VINF_SUCCESS) \
2931 return rcStrict2; \
2932 } while (0)
2933#else
2934# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2935#endif
2936
2937
2938#ifndef IEM_WITH_SETJMP
2939/**
2940 * Fetches the next signed double word from the opcode stream.
2941 *
2942 * @returns Strict VBox status code.
2943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2944 * @param pi32 Where to return the signed double word.
2945 */
2946DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2947{
2948 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2949}
2950#endif
2951
2952/**
2953 * Fetches the next signed double word from the opcode stream, returning
2954 * automatically on failure.
2955 *
2956 * @param a_pi32 Where to return the signed double word.
2957 * @remark Implicitly references pVCpu.
2958 */
2959#ifndef IEM_WITH_SETJMP
2960# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2961 do \
2962 { \
2963 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2964 if (rcStrict2 != VINF_SUCCESS) \
2965 return rcStrict2; \
2966 } while (0)
2967#else
2968# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2969#endif
2970
2971#ifndef IEM_WITH_SETJMP
2972
2973/**
2974 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2975 *
2976 * @returns Strict VBox status code.
2977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2978 * @param pu64 Where to return the opcode qword.
2979 */
2980DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2981{
2982 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2983 if (rcStrict == VINF_SUCCESS)
2984 {
2985 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2986 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2987 pVCpu->iem.s.abOpcode[offOpcode + 1],
2988 pVCpu->iem.s.abOpcode[offOpcode + 2],
2989 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2990 pVCpu->iem.s.offOpcode = offOpcode + 4;
2991 }
2992 else
2993 *pu64 = 0;
2994 return rcStrict;
2995}
2996
2997
2998/**
2999 * Fetches the next opcode dword, sign extending it into a quad word.
3000 *
3001 * @returns Strict VBox status code.
3002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3003 * @param pu64 Where to return the opcode quad word.
3004 */
3005DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3006{
3007 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3008 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3009 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3010
3011 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3012 pVCpu->iem.s.abOpcode[offOpcode + 1],
3013 pVCpu->iem.s.abOpcode[offOpcode + 2],
3014 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3015 *pu64 = i32;
3016 pVCpu->iem.s.offOpcode = offOpcode + 4;
3017 return VINF_SUCCESS;
3018}
3019
3020#endif /* !IEM_WITH_SETJMP */
3021
3022
3023/**
3024 * Fetches the next opcode double word and sign extends it to a quad word,
3025 * returns automatically on failure.
3026 *
3027 * @param a_pu64 Where to return the opcode quad word.
3028 * @remark Implicitly references pVCpu.
3029 */
3030#ifndef IEM_WITH_SETJMP
3031# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3032 do \
3033 { \
3034 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3035 if (rcStrict2 != VINF_SUCCESS) \
3036 return rcStrict2; \
3037 } while (0)
3038#else
3039# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3040#endif
3041
3042#ifndef IEM_WITH_SETJMP
3043
3044/**
3045 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3046 *
3047 * @returns Strict VBox status code.
3048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3049 * @param pu64 Where to return the opcode qword.
3050 */
3051DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3052{
3053 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3054 if (rcStrict == VINF_SUCCESS)
3055 {
3056 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3057# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3058 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3059# else
3060 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3061 pVCpu->iem.s.abOpcode[offOpcode + 1],
3062 pVCpu->iem.s.abOpcode[offOpcode + 2],
3063 pVCpu->iem.s.abOpcode[offOpcode + 3],
3064 pVCpu->iem.s.abOpcode[offOpcode + 4],
3065 pVCpu->iem.s.abOpcode[offOpcode + 5],
3066 pVCpu->iem.s.abOpcode[offOpcode + 6],
3067 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3068# endif
3069 pVCpu->iem.s.offOpcode = offOpcode + 8;
3070 }
3071 else
3072 *pu64 = 0;
3073 return rcStrict;
3074}
3075
3076
3077/**
3078 * Fetches the next opcode qword.
3079 *
3080 * @returns Strict VBox status code.
3081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3082 * @param pu64 Where to return the opcode qword.
3083 */
3084DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3085{
3086 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3087 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3088 {
3089# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3090 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3091# else
3092 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3093 pVCpu->iem.s.abOpcode[offOpcode + 1],
3094 pVCpu->iem.s.abOpcode[offOpcode + 2],
3095 pVCpu->iem.s.abOpcode[offOpcode + 3],
3096 pVCpu->iem.s.abOpcode[offOpcode + 4],
3097 pVCpu->iem.s.abOpcode[offOpcode + 5],
3098 pVCpu->iem.s.abOpcode[offOpcode + 6],
3099 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3100# endif
3101 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3102 return VINF_SUCCESS;
3103 }
3104 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3105}
3106
3107#else /* IEM_WITH_SETJMP */
3108
3109/**
3110 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3111 *
3112 * @returns The opcode qword.
3113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3114 */
3115DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3116{
3117# ifdef IEM_WITH_CODE_TLB
3118 uint64_t u64;
3119 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3120 return u64;
3121# else
3122 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3123 if (rcStrict == VINF_SUCCESS)
3124 {
3125 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3126 pVCpu->iem.s.offOpcode = offOpcode + 8;
3127# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3128 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3129# else
3130 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3131 pVCpu->iem.s.abOpcode[offOpcode + 1],
3132 pVCpu->iem.s.abOpcode[offOpcode + 2],
3133 pVCpu->iem.s.abOpcode[offOpcode + 3],
3134 pVCpu->iem.s.abOpcode[offOpcode + 4],
3135 pVCpu->iem.s.abOpcode[offOpcode + 5],
3136 pVCpu->iem.s.abOpcode[offOpcode + 6],
3137 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3138# endif
3139 }
3140 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3141# endif
3142}
3143
3144
3145/**
3146 * Fetches the next opcode qword, longjmp on error.
3147 *
3148 * @returns The opcode qword.
3149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3150 */
3151DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3152{
3153# ifdef IEM_WITH_CODE_TLB
3154 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3155 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3156 if (RT_LIKELY( pbBuf != NULL
3157 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3158 {
3159 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3160# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3161 return *(uint64_t const *)&pbBuf[offBuf];
3162# else
3163 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3164 pbBuf[offBuf + 1],
3165 pbBuf[offBuf + 2],
3166 pbBuf[offBuf + 3],
3167 pbBuf[offBuf + 4],
3168 pbBuf[offBuf + 5],
3169 pbBuf[offBuf + 6],
3170 pbBuf[offBuf + 7]);
3171# endif
3172 }
3173# else
3174 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3175 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3176 {
3177 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3178# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3179 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3180# else
3181 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3182 pVCpu->iem.s.abOpcode[offOpcode + 1],
3183 pVCpu->iem.s.abOpcode[offOpcode + 2],
3184 pVCpu->iem.s.abOpcode[offOpcode + 3],
3185 pVCpu->iem.s.abOpcode[offOpcode + 4],
3186 pVCpu->iem.s.abOpcode[offOpcode + 5],
3187 pVCpu->iem.s.abOpcode[offOpcode + 6],
3188 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3189# endif
3190 }
3191# endif
3192 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3193}
3194
3195#endif /* IEM_WITH_SETJMP */
3196
3197/**
3198 * Fetches the next opcode quad word, returns automatically on failure.
3199 *
3200 * @param a_pu64 Where to return the opcode quad word.
3201 * @remark Implicitly references pVCpu.
3202 */
3203#ifndef IEM_WITH_SETJMP
3204# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3205 do \
3206 { \
3207 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3208 if (rcStrict2 != VINF_SUCCESS) \
3209 return rcStrict2; \
3210 } while (0)
3211#else
3212# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3213#endif
3214
3215
3216/** @name Misc Worker Functions.
3217 * @{
3218 */
3219
3220/**
3221 * Gets the exception class for the specified exception vector.
3222 *
3223 * @returns The class of the specified exception.
3224 * @param uVector The exception vector.
3225 */
3226IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3227{
3228 Assert(uVector <= X86_XCPT_LAST);
3229 switch (uVector)
3230 {
3231 case X86_XCPT_DE:
3232 case X86_XCPT_TS:
3233 case X86_XCPT_NP:
3234 case X86_XCPT_SS:
3235 case X86_XCPT_GP:
3236 case X86_XCPT_SX: /* AMD only */
3237 return IEMXCPTCLASS_CONTRIBUTORY;
3238
3239 case X86_XCPT_PF:
3240 case X86_XCPT_VE: /* Intel only */
3241 return IEMXCPTCLASS_PAGE_FAULT;
3242
3243 case X86_XCPT_DF:
3244 return IEMXCPTCLASS_DOUBLE_FAULT;
3245 }
3246 return IEMXCPTCLASS_BENIGN;
3247}
3248
3249
3250/**
3251 * Evaluates how to handle an exception caused during delivery of another event
3252 * (exception / interrupt).
3253 *
3254 * @returns How to handle the recursive exception.
3255 * @param pVCpu The cross context virtual CPU structure of the
3256 * calling thread.
3257 * @param fPrevFlags The flags of the previous event.
3258 * @param uPrevVector The vector of the previous event.
3259 * @param fCurFlags The flags of the current exception.
3260 * @param uCurVector The vector of the current exception.
3261 * @param pfXcptRaiseInfo Where to store additional information about the
3262 * exception condition. Optional.
3263 */
3264VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3265 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3266{
3267 /*
3268 * Only CPU exceptions can be raised while delivering other events, software interrupt
3269 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3270 */
3271 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3272 Assert(pVCpu); RT_NOREF(pVCpu);
3273 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3274
3275 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3276 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3277 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3278 {
3279 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3280 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3281 {
3282 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3283 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3284 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3285 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3286 {
3287 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3288 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3289 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3290 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3291 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3292 }
3293 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3294 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3295 {
3296 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3297 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%u uCurVector=%u -> #DF\n", uPrevVector, uCurVector));
3298 }
3299 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3300 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3301 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3302 {
3303 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3304 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3305 }
3306 }
3307 else
3308 {
3309 if (uPrevVector == X86_XCPT_NMI)
3310 {
3311 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3312 if (uCurVector == X86_XCPT_PF)
3313 {
3314 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3315 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3316 }
3317 }
3318 else if ( uPrevVector == X86_XCPT_AC
3319 && uCurVector == X86_XCPT_AC)
3320 {
3321 enmRaise = IEMXCPTRAISE_CPU_HANG;
3322 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3323 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3324 }
3325 }
3326 }
3327 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3328 {
3329 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3330 if (uCurVector == X86_XCPT_PF)
3331 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3332 }
3333 else
3334 {
3335 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3336 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3337 }
3338
3339 if (pfXcptRaiseInfo)
3340 *pfXcptRaiseInfo = fRaiseInfo;
3341 return enmRaise;
3342}
3343
3344
3345/**
3346 * Enters the CPU shutdown state initiated by a triple fault or other
3347 * unrecoverable conditions.
3348 *
3349 * @returns Strict VBox status code.
3350 * @param pVCpu The cross context virtual CPU structure of the
3351 * calling thread.
3352 */
3353IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3354{
3355 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3356 {
3357 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3358 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3359 }
3360
3361 RT_NOREF(pVCpu);
3362 return VINF_EM_TRIPLE_FAULT;
3363}
3364
3365
3366/**
3367 * Validates a new SS segment.
3368 *
3369 * @returns VBox strict status code.
3370 * @param pVCpu The cross context virtual CPU structure of the
3371 * calling thread.
3372 * @param pCtx The CPU context.
3373 * @param NewSS The new SS selctor.
3374 * @param uCpl The CPL to load the stack for.
3375 * @param pDesc Where to return the descriptor.
3376 */
3377IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3378{
3379 NOREF(pCtx);
3380
3381 /* Null selectors are not allowed (we're not called for dispatching
3382 interrupts with SS=0 in long mode). */
3383 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3384 {
3385 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3386 return iemRaiseTaskSwitchFault0(pVCpu);
3387 }
3388
3389 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3390 if ((NewSS & X86_SEL_RPL) != uCpl)
3391 {
3392 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3393 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3394 }
3395
3396 /*
3397 * Read the descriptor.
3398 */
3399 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3400 if (rcStrict != VINF_SUCCESS)
3401 return rcStrict;
3402
3403 /*
3404 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3405 */
3406 if (!pDesc->Legacy.Gen.u1DescType)
3407 {
3408 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3409 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3410 }
3411
3412 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3413 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3414 {
3415 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3416 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3417 }
3418 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3419 {
3420 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3421 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3422 }
3423
3424 /* Is it there? */
3425 /** @todo testcase: Is this checked before the canonical / limit check below? */
3426 if (!pDesc->Legacy.Gen.u1Present)
3427 {
3428 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3429 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3430 }
3431
3432 return VINF_SUCCESS;
3433}
3434
3435
3436/**
3437 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3438 * not.
3439 *
3440 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3441 * @param a_pCtx The CPU context.
3442 */
3443#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3444# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3445 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3446 ? (a_pCtx)->eflags.u \
3447 : CPUMRawGetEFlags(a_pVCpu) )
3448#else
3449# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3450 ( (a_pCtx)->eflags.u )
3451#endif
3452
3453/**
3454 * Updates the EFLAGS in the correct manner wrt. PATM.
3455 *
3456 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3457 * @param a_pCtx The CPU context.
3458 * @param a_fEfl The new EFLAGS.
3459 */
3460#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3461# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3462 do { \
3463 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3464 (a_pCtx)->eflags.u = (a_fEfl); \
3465 else \
3466 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3467 } while (0)
3468#else
3469# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3470 do { \
3471 (a_pCtx)->eflags.u = (a_fEfl); \
3472 } while (0)
3473#endif
3474
3475
3476/** @} */
3477
3478/** @name Raising Exceptions.
3479 *
3480 * @{
3481 */
3482
3483
3484/**
3485 * Loads the specified stack far pointer from the TSS.
3486 *
3487 * @returns VBox strict status code.
3488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3489 * @param pCtx The CPU context.
3490 * @param uCpl The CPL to load the stack for.
3491 * @param pSelSS Where to return the new stack segment.
3492 * @param puEsp Where to return the new stack pointer.
3493 */
3494IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3495 PRTSEL pSelSS, uint32_t *puEsp)
3496{
3497 VBOXSTRICTRC rcStrict;
3498 Assert(uCpl < 4);
3499
3500 switch (pCtx->tr.Attr.n.u4Type)
3501 {
3502 /*
3503 * 16-bit TSS (X86TSS16).
3504 */
3505 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3506 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3507 {
3508 uint32_t off = uCpl * 4 + 2;
3509 if (off + 4 <= pCtx->tr.u32Limit)
3510 {
3511 /** @todo check actual access pattern here. */
3512 uint32_t u32Tmp = 0; /* gcc maybe... */
3513 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3514 if (rcStrict == VINF_SUCCESS)
3515 {
3516 *puEsp = RT_LOWORD(u32Tmp);
3517 *pSelSS = RT_HIWORD(u32Tmp);
3518 return VINF_SUCCESS;
3519 }
3520 }
3521 else
3522 {
3523 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3524 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3525 }
3526 break;
3527 }
3528
3529 /*
3530 * 32-bit TSS (X86TSS32).
3531 */
3532 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3533 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3534 {
3535 uint32_t off = uCpl * 8 + 4;
3536 if (off + 7 <= pCtx->tr.u32Limit)
3537 {
3538/** @todo check actual access pattern here. */
3539 uint64_t u64Tmp;
3540 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3541 if (rcStrict == VINF_SUCCESS)
3542 {
3543 *puEsp = u64Tmp & UINT32_MAX;
3544 *pSelSS = (RTSEL)(u64Tmp >> 32);
3545 return VINF_SUCCESS;
3546 }
3547 }
3548 else
3549 {
3550 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3551 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3552 }
3553 break;
3554 }
3555
3556 default:
3557 AssertFailed();
3558 rcStrict = VERR_IEM_IPE_4;
3559 break;
3560 }
3561
3562 *puEsp = 0; /* make gcc happy */
3563 *pSelSS = 0; /* make gcc happy */
3564 return rcStrict;
3565}
3566
3567
3568/**
3569 * Loads the specified stack pointer from the 64-bit TSS.
3570 *
3571 * @returns VBox strict status code.
3572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3573 * @param pCtx The CPU context.
3574 * @param uCpl The CPL to load the stack for.
3575 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3576 * @param puRsp Where to return the new stack pointer.
3577 */
3578IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3579{
3580 Assert(uCpl < 4);
3581 Assert(uIst < 8);
3582 *puRsp = 0; /* make gcc happy */
3583
3584 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3585
3586 uint32_t off;
3587 if (uIst)
3588 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3589 else
3590 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3591 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3592 {
3593 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3594 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3595 }
3596
3597 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3598}
3599
3600
3601/**
3602 * Adjust the CPU state according to the exception being raised.
3603 *
3604 * @param pCtx The CPU context.
3605 * @param u8Vector The exception that has been raised.
3606 */
3607DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3608{
3609 switch (u8Vector)
3610 {
3611 case X86_XCPT_DB:
3612 pCtx->dr[7] &= ~X86_DR7_GD;
3613 break;
3614 /** @todo Read the AMD and Intel exception reference... */
3615 }
3616}
3617
3618
3619/**
3620 * Implements exceptions and interrupts for real mode.
3621 *
3622 * @returns VBox strict status code.
3623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3624 * @param pCtx The CPU context.
3625 * @param cbInstr The number of bytes to offset rIP by in the return
3626 * address.
3627 * @param u8Vector The interrupt / exception vector number.
3628 * @param fFlags The flags.
3629 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3630 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3631 */
3632IEM_STATIC VBOXSTRICTRC
3633iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3634 PCPUMCTX pCtx,
3635 uint8_t cbInstr,
3636 uint8_t u8Vector,
3637 uint32_t fFlags,
3638 uint16_t uErr,
3639 uint64_t uCr2)
3640{
3641 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3642 NOREF(uErr); NOREF(uCr2);
3643
3644 /*
3645 * Read the IDT entry.
3646 */
3647 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3648 {
3649 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3650 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3651 }
3652 RTFAR16 Idte;
3653 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3654 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3655 return rcStrict;
3656
3657 /*
3658 * Push the stack frame.
3659 */
3660 uint16_t *pu16Frame;
3661 uint64_t uNewRsp;
3662 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3663 if (rcStrict != VINF_SUCCESS)
3664 return rcStrict;
3665
3666 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3667#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3668 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3669 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3670 fEfl |= UINT16_C(0xf000);
3671#endif
3672 pu16Frame[2] = (uint16_t)fEfl;
3673 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3674 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3675 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3676 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3677 return rcStrict;
3678
3679 /*
3680 * Load the vector address into cs:ip and make exception specific state
3681 * adjustments.
3682 */
3683 pCtx->cs.Sel = Idte.sel;
3684 pCtx->cs.ValidSel = Idte.sel;
3685 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3686 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3687 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3688 pCtx->rip = Idte.off;
3689 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3690 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3691
3692 /** @todo do we actually do this in real mode? */
3693 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3694 iemRaiseXcptAdjustState(pCtx, u8Vector);
3695
3696 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3697}
3698
3699
3700/**
3701 * Loads a NULL data selector into when coming from V8086 mode.
3702 *
3703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3704 * @param pSReg Pointer to the segment register.
3705 */
3706IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3707{
3708 pSReg->Sel = 0;
3709 pSReg->ValidSel = 0;
3710 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3711 {
3712 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3713 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3714 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3715 }
3716 else
3717 {
3718 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3719 /** @todo check this on AMD-V */
3720 pSReg->u64Base = 0;
3721 pSReg->u32Limit = 0;
3722 }
3723}
3724
3725
3726/**
3727 * Loads a segment selector during a task switch in V8086 mode.
3728 *
3729 * @param pSReg Pointer to the segment register.
3730 * @param uSel The selector value to load.
3731 */
3732IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3733{
3734 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3735 pSReg->Sel = uSel;
3736 pSReg->ValidSel = uSel;
3737 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3738 pSReg->u64Base = uSel << 4;
3739 pSReg->u32Limit = 0xffff;
3740 pSReg->Attr.u = 0xf3;
3741}
3742
3743
3744/**
3745 * Loads a NULL data selector into a selector register, both the hidden and
3746 * visible parts, in protected mode.
3747 *
3748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3749 * @param pSReg Pointer to the segment register.
3750 * @param uRpl The RPL.
3751 */
3752IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3753{
3754 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3755 * data selector in protected mode. */
3756 pSReg->Sel = uRpl;
3757 pSReg->ValidSel = uRpl;
3758 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3759 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3760 {
3761 /* VT-x (Intel 3960x) observed doing something like this. */
3762 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3763 pSReg->u32Limit = UINT32_MAX;
3764 pSReg->u64Base = 0;
3765 }
3766 else
3767 {
3768 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3769 pSReg->u32Limit = 0;
3770 pSReg->u64Base = 0;
3771 }
3772}
3773
3774
3775/**
3776 * Loads a segment selector during a task switch in protected mode.
3777 *
3778 * In this task switch scenario, we would throw \#TS exceptions rather than
3779 * \#GPs.
3780 *
3781 * @returns VBox strict status code.
3782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3783 * @param pSReg Pointer to the segment register.
3784 * @param uSel The new selector value.
3785 *
3786 * @remarks This does _not_ handle CS or SS.
3787 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3788 */
3789IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3790{
3791 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3792
3793 /* Null data selector. */
3794 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3795 {
3796 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3797 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3798 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3799 return VINF_SUCCESS;
3800 }
3801
3802 /* Fetch the descriptor. */
3803 IEMSELDESC Desc;
3804 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3805 if (rcStrict != VINF_SUCCESS)
3806 {
3807 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3808 VBOXSTRICTRC_VAL(rcStrict)));
3809 return rcStrict;
3810 }
3811
3812 /* Must be a data segment or readable code segment. */
3813 if ( !Desc.Legacy.Gen.u1DescType
3814 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3815 {
3816 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3817 Desc.Legacy.Gen.u4Type));
3818 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3819 }
3820
3821 /* Check privileges for data segments and non-conforming code segments. */
3822 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3823 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3824 {
3825 /* The RPL and the new CPL must be less than or equal to the DPL. */
3826 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3827 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3828 {
3829 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3830 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3831 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3832 }
3833 }
3834
3835 /* Is it there? */
3836 if (!Desc.Legacy.Gen.u1Present)
3837 {
3838 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3839 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3840 }
3841
3842 /* The base and limit. */
3843 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3844 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3845
3846 /*
3847 * Ok, everything checked out fine. Now set the accessed bit before
3848 * committing the result into the registers.
3849 */
3850 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3851 {
3852 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3853 if (rcStrict != VINF_SUCCESS)
3854 return rcStrict;
3855 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3856 }
3857
3858 /* Commit */
3859 pSReg->Sel = uSel;
3860 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3861 pSReg->u32Limit = cbLimit;
3862 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3863 pSReg->ValidSel = uSel;
3864 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3865 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3866 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3867
3868 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3869 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3870 return VINF_SUCCESS;
3871}
3872
3873
3874/**
3875 * Performs a task switch.
3876 *
3877 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3878 * caller is responsible for performing the necessary checks (like DPL, TSS
3879 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3880 * reference for JMP, CALL, IRET.
3881 *
3882 * If the task switch is the due to a software interrupt or hardware exception,
3883 * the caller is responsible for validating the TSS selector and descriptor. See
3884 * Intel Instruction reference for INT n.
3885 *
3886 * @returns VBox strict status code.
3887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3888 * @param pCtx The CPU context.
3889 * @param enmTaskSwitch What caused this task switch.
3890 * @param uNextEip The EIP effective after the task switch.
3891 * @param fFlags The flags.
3892 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3893 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3894 * @param SelTSS The TSS selector of the new task.
3895 * @param pNewDescTSS Pointer to the new TSS descriptor.
3896 */
3897IEM_STATIC VBOXSTRICTRC
3898iemTaskSwitch(PVMCPU pVCpu,
3899 PCPUMCTX pCtx,
3900 IEMTASKSWITCH enmTaskSwitch,
3901 uint32_t uNextEip,
3902 uint32_t fFlags,
3903 uint16_t uErr,
3904 uint64_t uCr2,
3905 RTSEL SelTSS,
3906 PIEMSELDESC pNewDescTSS)
3907{
3908 Assert(!IEM_IS_REAL_MODE(pVCpu));
3909 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3910
3911 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3912 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3913 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3914 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3915 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3916
3917 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3918 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3919
3920 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3921 fIsNewTSS386, pCtx->eip, uNextEip));
3922
3923 /* Update CR2 in case it's a page-fault. */
3924 /** @todo This should probably be done much earlier in IEM/PGM. See
3925 * @bugref{5653#c49}. */
3926 if (fFlags & IEM_XCPT_FLAGS_CR2)
3927 pCtx->cr2 = uCr2;
3928
3929 /*
3930 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3931 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3932 */
3933 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3934 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3935 if (uNewTSSLimit < uNewTSSLimitMin)
3936 {
3937 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3938 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3939 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3940 }
3941
3942 /*
3943 * Check the current TSS limit. The last written byte to the current TSS during the
3944 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3945 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3946 *
3947 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3948 * end up with smaller than "legal" TSS limits.
3949 */
3950 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3951 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3952 if (uCurTSSLimit < uCurTSSLimitMin)
3953 {
3954 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3955 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3956 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3957 }
3958
3959 /*
3960 * Verify that the new TSS can be accessed and map it. Map only the required contents
3961 * and not the entire TSS.
3962 */
3963 void *pvNewTSS;
3964 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3965 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3966 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3967 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3968 * not perform correct translation if this happens. See Intel spec. 7.2.1
3969 * "Task-State Segment" */
3970 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3971 if (rcStrict != VINF_SUCCESS)
3972 {
3973 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3974 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3975 return rcStrict;
3976 }
3977
3978 /*
3979 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3980 */
3981 uint32_t u32EFlags = pCtx->eflags.u32;
3982 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3983 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3984 {
3985 PX86DESC pDescCurTSS;
3986 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3987 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3988 if (rcStrict != VINF_SUCCESS)
3989 {
3990 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3991 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3992 return rcStrict;
3993 }
3994
3995 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3996 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3997 if (rcStrict != VINF_SUCCESS)
3998 {
3999 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4000 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4001 return rcStrict;
4002 }
4003
4004 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4005 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4006 {
4007 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4008 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4009 u32EFlags &= ~X86_EFL_NT;
4010 }
4011 }
4012
4013 /*
4014 * Save the CPU state into the current TSS.
4015 */
4016 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4017 if (GCPtrNewTSS == GCPtrCurTSS)
4018 {
4019 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4020 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4021 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4022 }
4023 if (fIsNewTSS386)
4024 {
4025 /*
4026 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4027 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4028 */
4029 void *pvCurTSS32;
4030 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4031 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4032 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4033 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4034 if (rcStrict != VINF_SUCCESS)
4035 {
4036 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4037 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4038 return rcStrict;
4039 }
4040
4041 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4042 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4043 pCurTSS32->eip = uNextEip;
4044 pCurTSS32->eflags = u32EFlags;
4045 pCurTSS32->eax = pCtx->eax;
4046 pCurTSS32->ecx = pCtx->ecx;
4047 pCurTSS32->edx = pCtx->edx;
4048 pCurTSS32->ebx = pCtx->ebx;
4049 pCurTSS32->esp = pCtx->esp;
4050 pCurTSS32->ebp = pCtx->ebp;
4051 pCurTSS32->esi = pCtx->esi;
4052 pCurTSS32->edi = pCtx->edi;
4053 pCurTSS32->es = pCtx->es.Sel;
4054 pCurTSS32->cs = pCtx->cs.Sel;
4055 pCurTSS32->ss = pCtx->ss.Sel;
4056 pCurTSS32->ds = pCtx->ds.Sel;
4057 pCurTSS32->fs = pCtx->fs.Sel;
4058 pCurTSS32->gs = pCtx->gs.Sel;
4059
4060 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4061 if (rcStrict != VINF_SUCCESS)
4062 {
4063 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4064 VBOXSTRICTRC_VAL(rcStrict)));
4065 return rcStrict;
4066 }
4067 }
4068 else
4069 {
4070 /*
4071 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4072 */
4073 void *pvCurTSS16;
4074 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4075 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4076 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4077 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4078 if (rcStrict != VINF_SUCCESS)
4079 {
4080 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4081 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4082 return rcStrict;
4083 }
4084
4085 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4086 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4087 pCurTSS16->ip = uNextEip;
4088 pCurTSS16->flags = u32EFlags;
4089 pCurTSS16->ax = pCtx->ax;
4090 pCurTSS16->cx = pCtx->cx;
4091 pCurTSS16->dx = pCtx->dx;
4092 pCurTSS16->bx = pCtx->bx;
4093 pCurTSS16->sp = pCtx->sp;
4094 pCurTSS16->bp = pCtx->bp;
4095 pCurTSS16->si = pCtx->si;
4096 pCurTSS16->di = pCtx->di;
4097 pCurTSS16->es = pCtx->es.Sel;
4098 pCurTSS16->cs = pCtx->cs.Sel;
4099 pCurTSS16->ss = pCtx->ss.Sel;
4100 pCurTSS16->ds = pCtx->ds.Sel;
4101
4102 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4103 if (rcStrict != VINF_SUCCESS)
4104 {
4105 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4106 VBOXSTRICTRC_VAL(rcStrict)));
4107 return rcStrict;
4108 }
4109 }
4110
4111 /*
4112 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4113 */
4114 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4115 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4116 {
4117 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4118 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4119 pNewTSS->selPrev = pCtx->tr.Sel;
4120 }
4121
4122 /*
4123 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4124 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4125 */
4126 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4127 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4128 bool fNewDebugTrap;
4129 if (fIsNewTSS386)
4130 {
4131 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4132 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4133 uNewEip = pNewTSS32->eip;
4134 uNewEflags = pNewTSS32->eflags;
4135 uNewEax = pNewTSS32->eax;
4136 uNewEcx = pNewTSS32->ecx;
4137 uNewEdx = pNewTSS32->edx;
4138 uNewEbx = pNewTSS32->ebx;
4139 uNewEsp = pNewTSS32->esp;
4140 uNewEbp = pNewTSS32->ebp;
4141 uNewEsi = pNewTSS32->esi;
4142 uNewEdi = pNewTSS32->edi;
4143 uNewES = pNewTSS32->es;
4144 uNewCS = pNewTSS32->cs;
4145 uNewSS = pNewTSS32->ss;
4146 uNewDS = pNewTSS32->ds;
4147 uNewFS = pNewTSS32->fs;
4148 uNewGS = pNewTSS32->gs;
4149 uNewLdt = pNewTSS32->selLdt;
4150 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4151 }
4152 else
4153 {
4154 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4155 uNewCr3 = 0;
4156 uNewEip = pNewTSS16->ip;
4157 uNewEflags = pNewTSS16->flags;
4158 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4159 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4160 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4161 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4162 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4163 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4164 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4165 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4166 uNewES = pNewTSS16->es;
4167 uNewCS = pNewTSS16->cs;
4168 uNewSS = pNewTSS16->ss;
4169 uNewDS = pNewTSS16->ds;
4170 uNewFS = 0;
4171 uNewGS = 0;
4172 uNewLdt = pNewTSS16->selLdt;
4173 fNewDebugTrap = false;
4174 }
4175
4176 if (GCPtrNewTSS == GCPtrCurTSS)
4177 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4178 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4179
4180 /*
4181 * We're done accessing the new TSS.
4182 */
4183 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4184 if (rcStrict != VINF_SUCCESS)
4185 {
4186 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4187 return rcStrict;
4188 }
4189
4190 /*
4191 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4192 */
4193 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4194 {
4195 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4196 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4197 if (rcStrict != VINF_SUCCESS)
4198 {
4199 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4200 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4201 return rcStrict;
4202 }
4203
4204 /* Check that the descriptor indicates the new TSS is available (not busy). */
4205 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4206 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4207 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4208
4209 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4210 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4211 if (rcStrict != VINF_SUCCESS)
4212 {
4213 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4214 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4215 return rcStrict;
4216 }
4217 }
4218
4219 /*
4220 * From this point on, we're technically in the new task. We will defer exceptions
4221 * until the completion of the task switch but before executing any instructions in the new task.
4222 */
4223 pCtx->tr.Sel = SelTSS;
4224 pCtx->tr.ValidSel = SelTSS;
4225 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4226 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4227 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4228 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4229 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4230
4231 /* Set the busy bit in TR. */
4232 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4233 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4234 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4235 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4236 {
4237 uNewEflags |= X86_EFL_NT;
4238 }
4239
4240 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4241 pCtx->cr0 |= X86_CR0_TS;
4242 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4243
4244 pCtx->eip = uNewEip;
4245 pCtx->eax = uNewEax;
4246 pCtx->ecx = uNewEcx;
4247 pCtx->edx = uNewEdx;
4248 pCtx->ebx = uNewEbx;
4249 pCtx->esp = uNewEsp;
4250 pCtx->ebp = uNewEbp;
4251 pCtx->esi = uNewEsi;
4252 pCtx->edi = uNewEdi;
4253
4254 uNewEflags &= X86_EFL_LIVE_MASK;
4255 uNewEflags |= X86_EFL_RA1_MASK;
4256 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4257
4258 /*
4259 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4260 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4261 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4262 */
4263 pCtx->es.Sel = uNewES;
4264 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4265
4266 pCtx->cs.Sel = uNewCS;
4267 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4268
4269 pCtx->ss.Sel = uNewSS;
4270 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4271
4272 pCtx->ds.Sel = uNewDS;
4273 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4274
4275 pCtx->fs.Sel = uNewFS;
4276 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4277
4278 pCtx->gs.Sel = uNewGS;
4279 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4280 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4281
4282 pCtx->ldtr.Sel = uNewLdt;
4283 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4284 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4285 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4286
4287 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4288 {
4289 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4290 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4291 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4292 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4293 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4294 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4295 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4296 }
4297
4298 /*
4299 * Switch CR3 for the new task.
4300 */
4301 if ( fIsNewTSS386
4302 && (pCtx->cr0 & X86_CR0_PG))
4303 {
4304 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4305 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4306 {
4307 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4308 AssertRCSuccessReturn(rc, rc);
4309 }
4310 else
4311 pCtx->cr3 = uNewCr3;
4312
4313 /* Inform PGM. */
4314 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4315 {
4316 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4317 AssertRCReturn(rc, rc);
4318 /* ignore informational status codes */
4319 }
4320 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4321 }
4322
4323 /*
4324 * Switch LDTR for the new task.
4325 */
4326 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4327 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4328 else
4329 {
4330 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4331
4332 IEMSELDESC DescNewLdt;
4333 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4334 if (rcStrict != VINF_SUCCESS)
4335 {
4336 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4337 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4338 return rcStrict;
4339 }
4340 if ( !DescNewLdt.Legacy.Gen.u1Present
4341 || DescNewLdt.Legacy.Gen.u1DescType
4342 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4343 {
4344 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4345 uNewLdt, DescNewLdt.Legacy.u));
4346 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4347 }
4348
4349 pCtx->ldtr.ValidSel = uNewLdt;
4350 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4351 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4352 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4353 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4354 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4355 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4356 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4357 }
4358
4359 IEMSELDESC DescSS;
4360 if (IEM_IS_V86_MODE(pVCpu))
4361 {
4362 pVCpu->iem.s.uCpl = 3;
4363 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4364 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4365 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4366 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4367 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4368 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4369
4370 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4371 DescSS.Legacy.u = 0;
4372 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4373 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4374 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4375 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4376 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4377 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4378 DescSS.Legacy.Gen.u2Dpl = 3;
4379 }
4380 else
4381 {
4382 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4383
4384 /*
4385 * Load the stack segment for the new task.
4386 */
4387 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4388 {
4389 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4390 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4391 }
4392
4393 /* Fetch the descriptor. */
4394 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4395 if (rcStrict != VINF_SUCCESS)
4396 {
4397 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4398 VBOXSTRICTRC_VAL(rcStrict)));
4399 return rcStrict;
4400 }
4401
4402 /* SS must be a data segment and writable. */
4403 if ( !DescSS.Legacy.Gen.u1DescType
4404 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4405 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4406 {
4407 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4408 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4409 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4410 }
4411
4412 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4413 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4414 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4415 {
4416 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4417 uNewCpl));
4418 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4419 }
4420
4421 /* Is it there? */
4422 if (!DescSS.Legacy.Gen.u1Present)
4423 {
4424 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4425 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4426 }
4427
4428 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4429 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4430
4431 /* Set the accessed bit before committing the result into SS. */
4432 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4433 {
4434 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4435 if (rcStrict != VINF_SUCCESS)
4436 return rcStrict;
4437 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4438 }
4439
4440 /* Commit SS. */
4441 pCtx->ss.Sel = uNewSS;
4442 pCtx->ss.ValidSel = uNewSS;
4443 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4444 pCtx->ss.u32Limit = cbLimit;
4445 pCtx->ss.u64Base = u64Base;
4446 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4447 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4448
4449 /* CPL has changed, update IEM before loading rest of segments. */
4450 pVCpu->iem.s.uCpl = uNewCpl;
4451
4452 /*
4453 * Load the data segments for the new task.
4454 */
4455 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4456 if (rcStrict != VINF_SUCCESS)
4457 return rcStrict;
4458 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4459 if (rcStrict != VINF_SUCCESS)
4460 return rcStrict;
4461 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4462 if (rcStrict != VINF_SUCCESS)
4463 return rcStrict;
4464 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4465 if (rcStrict != VINF_SUCCESS)
4466 return rcStrict;
4467
4468 /*
4469 * Load the code segment for the new task.
4470 */
4471 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4472 {
4473 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4474 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4475 }
4476
4477 /* Fetch the descriptor. */
4478 IEMSELDESC DescCS;
4479 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4480 if (rcStrict != VINF_SUCCESS)
4481 {
4482 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4483 return rcStrict;
4484 }
4485
4486 /* CS must be a code segment. */
4487 if ( !DescCS.Legacy.Gen.u1DescType
4488 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4489 {
4490 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4491 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4492 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4493 }
4494
4495 /* For conforming CS, DPL must be less than or equal to the RPL. */
4496 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4497 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4498 {
4499 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4500 DescCS.Legacy.Gen.u2Dpl));
4501 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4502 }
4503
4504 /* For non-conforming CS, DPL must match RPL. */
4505 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4506 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4507 {
4508 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4509 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4510 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4511 }
4512
4513 /* Is it there? */
4514 if (!DescCS.Legacy.Gen.u1Present)
4515 {
4516 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4517 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4518 }
4519
4520 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4521 u64Base = X86DESC_BASE(&DescCS.Legacy);
4522
4523 /* Set the accessed bit before committing the result into CS. */
4524 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4525 {
4526 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4527 if (rcStrict != VINF_SUCCESS)
4528 return rcStrict;
4529 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4530 }
4531
4532 /* Commit CS. */
4533 pCtx->cs.Sel = uNewCS;
4534 pCtx->cs.ValidSel = uNewCS;
4535 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4536 pCtx->cs.u32Limit = cbLimit;
4537 pCtx->cs.u64Base = u64Base;
4538 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4539 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4540 }
4541
4542 /** @todo Debug trap. */
4543 if (fIsNewTSS386 && fNewDebugTrap)
4544 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4545
4546 /*
4547 * Construct the error code masks based on what caused this task switch.
4548 * See Intel Instruction reference for INT.
4549 */
4550 uint16_t uExt;
4551 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4552 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4553 {
4554 uExt = 1;
4555 }
4556 else
4557 uExt = 0;
4558
4559 /*
4560 * Push any error code on to the new stack.
4561 */
4562 if (fFlags & IEM_XCPT_FLAGS_ERR)
4563 {
4564 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4565 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4566 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4567
4568 /* Check that there is sufficient space on the stack. */
4569 /** @todo Factor out segment limit checking for normal/expand down segments
4570 * into a separate function. */
4571 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4572 {
4573 if ( pCtx->esp - 1 > cbLimitSS
4574 || pCtx->esp < cbStackFrame)
4575 {
4576 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4577 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4578 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4579 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4580 }
4581 }
4582 else
4583 {
4584 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4585 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4586 {
4587 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4588 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4589 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4590 }
4591 }
4592
4593
4594 if (fIsNewTSS386)
4595 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4596 else
4597 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4598 if (rcStrict != VINF_SUCCESS)
4599 {
4600 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4601 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4602 return rcStrict;
4603 }
4604 }
4605
4606 /* Check the new EIP against the new CS limit. */
4607 if (pCtx->eip > pCtx->cs.u32Limit)
4608 {
4609 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4610 pCtx->eip, pCtx->cs.u32Limit));
4611 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4612 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4613 }
4614
4615 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4616 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4617}
4618
4619
4620/**
4621 * Implements exceptions and interrupts for protected mode.
4622 *
4623 * @returns VBox strict status code.
4624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4625 * @param pCtx The CPU context.
4626 * @param cbInstr The number of bytes to offset rIP by in the return
4627 * address.
4628 * @param u8Vector The interrupt / exception vector number.
4629 * @param fFlags The flags.
4630 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4631 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4632 */
4633IEM_STATIC VBOXSTRICTRC
4634iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4635 PCPUMCTX pCtx,
4636 uint8_t cbInstr,
4637 uint8_t u8Vector,
4638 uint32_t fFlags,
4639 uint16_t uErr,
4640 uint64_t uCr2)
4641{
4642 /*
4643 * Read the IDT entry.
4644 */
4645 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4646 {
4647 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4648 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4649 }
4650 X86DESC Idte;
4651 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4652 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4653 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4654 return rcStrict;
4655 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4656 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4657 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4658
4659 /*
4660 * Check the descriptor type, DPL and such.
4661 * ASSUMES this is done in the same order as described for call-gate calls.
4662 */
4663 if (Idte.Gate.u1DescType)
4664 {
4665 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4666 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4667 }
4668 bool fTaskGate = false;
4669 uint8_t f32BitGate = true;
4670 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4671 switch (Idte.Gate.u4Type)
4672 {
4673 case X86_SEL_TYPE_SYS_UNDEFINED:
4674 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4675 case X86_SEL_TYPE_SYS_LDT:
4676 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4677 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4678 case X86_SEL_TYPE_SYS_UNDEFINED2:
4679 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4680 case X86_SEL_TYPE_SYS_UNDEFINED3:
4681 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4682 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4683 case X86_SEL_TYPE_SYS_UNDEFINED4:
4684 {
4685 /** @todo check what actually happens when the type is wrong...
4686 * esp. call gates. */
4687 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4688 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4689 }
4690
4691 case X86_SEL_TYPE_SYS_286_INT_GATE:
4692 f32BitGate = false;
4693 RT_FALL_THRU();
4694 case X86_SEL_TYPE_SYS_386_INT_GATE:
4695 fEflToClear |= X86_EFL_IF;
4696 break;
4697
4698 case X86_SEL_TYPE_SYS_TASK_GATE:
4699 fTaskGate = true;
4700#ifndef IEM_IMPLEMENTS_TASKSWITCH
4701 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4702#endif
4703 break;
4704
4705 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4706 f32BitGate = false;
4707 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4708 break;
4709
4710 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4711 }
4712
4713 /* Check DPL against CPL if applicable. */
4714 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4715 {
4716 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4717 {
4718 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4719 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4720 }
4721 }
4722
4723 /* Is it there? */
4724 if (!Idte.Gate.u1Present)
4725 {
4726 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4727 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4728 }
4729
4730 /* Is it a task-gate? */
4731 if (fTaskGate)
4732 {
4733 /*
4734 * Construct the error code masks based on what caused this task switch.
4735 * See Intel Instruction reference for INT.
4736 */
4737 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4738 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4739 RTSEL SelTSS = Idte.Gate.u16Sel;
4740
4741 /*
4742 * Fetch the TSS descriptor in the GDT.
4743 */
4744 IEMSELDESC DescTSS;
4745 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4746 if (rcStrict != VINF_SUCCESS)
4747 {
4748 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4749 VBOXSTRICTRC_VAL(rcStrict)));
4750 return rcStrict;
4751 }
4752
4753 /* The TSS descriptor must be a system segment and be available (not busy). */
4754 if ( DescTSS.Legacy.Gen.u1DescType
4755 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4756 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4757 {
4758 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4759 u8Vector, SelTSS, DescTSS.Legacy.au64));
4760 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4761 }
4762
4763 /* The TSS must be present. */
4764 if (!DescTSS.Legacy.Gen.u1Present)
4765 {
4766 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4767 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4768 }
4769
4770 /* Do the actual task switch. */
4771 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4772 }
4773
4774 /* A null CS is bad. */
4775 RTSEL NewCS = Idte.Gate.u16Sel;
4776 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4777 {
4778 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4779 return iemRaiseGeneralProtectionFault0(pVCpu);
4780 }
4781
4782 /* Fetch the descriptor for the new CS. */
4783 IEMSELDESC DescCS;
4784 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4785 if (rcStrict != VINF_SUCCESS)
4786 {
4787 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4788 return rcStrict;
4789 }
4790
4791 /* Must be a code segment. */
4792 if (!DescCS.Legacy.Gen.u1DescType)
4793 {
4794 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4795 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4796 }
4797 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4798 {
4799 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4800 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4801 }
4802
4803 /* Don't allow lowering the privilege level. */
4804 /** @todo Does the lowering of privileges apply to software interrupts
4805 * only? This has bearings on the more-privileged or
4806 * same-privilege stack behavior further down. A testcase would
4807 * be nice. */
4808 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4809 {
4810 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4811 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4812 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4813 }
4814
4815 /* Make sure the selector is present. */
4816 if (!DescCS.Legacy.Gen.u1Present)
4817 {
4818 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4819 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4820 }
4821
4822 /* Check the new EIP against the new CS limit. */
4823 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4824 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4825 ? Idte.Gate.u16OffsetLow
4826 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4827 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4828 if (uNewEip > cbLimitCS)
4829 {
4830 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4831 u8Vector, uNewEip, cbLimitCS, NewCS));
4832 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4833 }
4834 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4835
4836 /* Calc the flag image to push. */
4837 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4838 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4839 fEfl &= ~X86_EFL_RF;
4840 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4841 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4842
4843 /* From V8086 mode only go to CPL 0. */
4844 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4845 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4846 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4847 {
4848 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4849 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4850 }
4851
4852 /*
4853 * If the privilege level changes, we need to get a new stack from the TSS.
4854 * This in turns means validating the new SS and ESP...
4855 */
4856 if (uNewCpl != pVCpu->iem.s.uCpl)
4857 {
4858 RTSEL NewSS;
4859 uint32_t uNewEsp;
4860 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4861 if (rcStrict != VINF_SUCCESS)
4862 return rcStrict;
4863
4864 IEMSELDESC DescSS;
4865 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4866 if (rcStrict != VINF_SUCCESS)
4867 return rcStrict;
4868 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4869 if (!DescSS.Legacy.Gen.u1DefBig)
4870 {
4871 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4872 uNewEsp = (uint16_t)uNewEsp;
4873 }
4874
4875 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4876
4877 /* Check that there is sufficient space for the stack frame. */
4878 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4879 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4880 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4881 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4882
4883 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4884 {
4885 if ( uNewEsp - 1 > cbLimitSS
4886 || uNewEsp < cbStackFrame)
4887 {
4888 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4889 u8Vector, NewSS, uNewEsp, cbStackFrame));
4890 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4891 }
4892 }
4893 else
4894 {
4895 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4896 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4897 {
4898 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4899 u8Vector, NewSS, uNewEsp, cbStackFrame));
4900 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4901 }
4902 }
4903
4904 /*
4905 * Start making changes.
4906 */
4907
4908 /* Set the new CPL so that stack accesses use it. */
4909 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4910 pVCpu->iem.s.uCpl = uNewCpl;
4911
4912 /* Create the stack frame. */
4913 RTPTRUNION uStackFrame;
4914 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4915 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4916 if (rcStrict != VINF_SUCCESS)
4917 return rcStrict;
4918 void * const pvStackFrame = uStackFrame.pv;
4919 if (f32BitGate)
4920 {
4921 if (fFlags & IEM_XCPT_FLAGS_ERR)
4922 *uStackFrame.pu32++ = uErr;
4923 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4924 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4925 uStackFrame.pu32[2] = fEfl;
4926 uStackFrame.pu32[3] = pCtx->esp;
4927 uStackFrame.pu32[4] = pCtx->ss.Sel;
4928 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4929 if (fEfl & X86_EFL_VM)
4930 {
4931 uStackFrame.pu32[1] = pCtx->cs.Sel;
4932 uStackFrame.pu32[5] = pCtx->es.Sel;
4933 uStackFrame.pu32[6] = pCtx->ds.Sel;
4934 uStackFrame.pu32[7] = pCtx->fs.Sel;
4935 uStackFrame.pu32[8] = pCtx->gs.Sel;
4936 }
4937 }
4938 else
4939 {
4940 if (fFlags & IEM_XCPT_FLAGS_ERR)
4941 *uStackFrame.pu16++ = uErr;
4942 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4943 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4944 uStackFrame.pu16[2] = fEfl;
4945 uStackFrame.pu16[3] = pCtx->sp;
4946 uStackFrame.pu16[4] = pCtx->ss.Sel;
4947 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4948 if (fEfl & X86_EFL_VM)
4949 {
4950 uStackFrame.pu16[1] = pCtx->cs.Sel;
4951 uStackFrame.pu16[5] = pCtx->es.Sel;
4952 uStackFrame.pu16[6] = pCtx->ds.Sel;
4953 uStackFrame.pu16[7] = pCtx->fs.Sel;
4954 uStackFrame.pu16[8] = pCtx->gs.Sel;
4955 }
4956 }
4957 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4958 if (rcStrict != VINF_SUCCESS)
4959 return rcStrict;
4960
4961 /* Mark the selectors 'accessed' (hope this is the correct time). */
4962 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4963 * after pushing the stack frame? (Write protect the gdt + stack to
4964 * find out.) */
4965 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4966 {
4967 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4968 if (rcStrict != VINF_SUCCESS)
4969 return rcStrict;
4970 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4971 }
4972
4973 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4974 {
4975 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4976 if (rcStrict != VINF_SUCCESS)
4977 return rcStrict;
4978 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4979 }
4980
4981 /*
4982 * Start comitting the register changes (joins with the DPL=CPL branch).
4983 */
4984 pCtx->ss.Sel = NewSS;
4985 pCtx->ss.ValidSel = NewSS;
4986 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4987 pCtx->ss.u32Limit = cbLimitSS;
4988 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4989 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4990 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4991 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4992 * SP is loaded).
4993 * Need to check the other combinations too:
4994 * - 16-bit TSS, 32-bit handler
4995 * - 32-bit TSS, 16-bit handler */
4996 if (!pCtx->ss.Attr.n.u1DefBig)
4997 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4998 else
4999 pCtx->rsp = uNewEsp - cbStackFrame;
5000
5001 if (fEfl & X86_EFL_VM)
5002 {
5003 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5004 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5005 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5006 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5007 }
5008 }
5009 /*
5010 * Same privilege, no stack change and smaller stack frame.
5011 */
5012 else
5013 {
5014 uint64_t uNewRsp;
5015 RTPTRUNION uStackFrame;
5016 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5017 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5018 if (rcStrict != VINF_SUCCESS)
5019 return rcStrict;
5020 void * const pvStackFrame = uStackFrame.pv;
5021
5022 if (f32BitGate)
5023 {
5024 if (fFlags & IEM_XCPT_FLAGS_ERR)
5025 *uStackFrame.pu32++ = uErr;
5026 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5027 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5028 uStackFrame.pu32[2] = fEfl;
5029 }
5030 else
5031 {
5032 if (fFlags & IEM_XCPT_FLAGS_ERR)
5033 *uStackFrame.pu16++ = uErr;
5034 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5035 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5036 uStackFrame.pu16[2] = fEfl;
5037 }
5038 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5039 if (rcStrict != VINF_SUCCESS)
5040 return rcStrict;
5041
5042 /* Mark the CS selector as 'accessed'. */
5043 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5044 {
5045 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5046 if (rcStrict != VINF_SUCCESS)
5047 return rcStrict;
5048 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5049 }
5050
5051 /*
5052 * Start committing the register changes (joins with the other branch).
5053 */
5054 pCtx->rsp = uNewRsp;
5055 }
5056
5057 /* ... register committing continues. */
5058 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5059 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5060 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5061 pCtx->cs.u32Limit = cbLimitCS;
5062 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5063 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5064
5065 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5066 fEfl &= ~fEflToClear;
5067 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5068
5069 if (fFlags & IEM_XCPT_FLAGS_CR2)
5070 pCtx->cr2 = uCr2;
5071
5072 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5073 iemRaiseXcptAdjustState(pCtx, u8Vector);
5074
5075 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5076}
5077
5078
5079/**
5080 * Implements exceptions and interrupts for long mode.
5081 *
5082 * @returns VBox strict status code.
5083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5084 * @param pCtx The CPU context.
5085 * @param cbInstr The number of bytes to offset rIP by in the return
5086 * address.
5087 * @param u8Vector The interrupt / exception vector number.
5088 * @param fFlags The flags.
5089 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5090 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5091 */
5092IEM_STATIC VBOXSTRICTRC
5093iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5094 PCPUMCTX pCtx,
5095 uint8_t cbInstr,
5096 uint8_t u8Vector,
5097 uint32_t fFlags,
5098 uint16_t uErr,
5099 uint64_t uCr2)
5100{
5101 /*
5102 * Read the IDT entry.
5103 */
5104 uint16_t offIdt = (uint16_t)u8Vector << 4;
5105 if (pCtx->idtr.cbIdt < offIdt + 7)
5106 {
5107 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5108 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5109 }
5110 X86DESC64 Idte;
5111 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5112 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5113 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5114 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5115 return rcStrict;
5116 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5117 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5118 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5119
5120 /*
5121 * Check the descriptor type, DPL and such.
5122 * ASSUMES this is done in the same order as described for call-gate calls.
5123 */
5124 if (Idte.Gate.u1DescType)
5125 {
5126 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5127 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5128 }
5129 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5130 switch (Idte.Gate.u4Type)
5131 {
5132 case AMD64_SEL_TYPE_SYS_INT_GATE:
5133 fEflToClear |= X86_EFL_IF;
5134 break;
5135 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5136 break;
5137
5138 default:
5139 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5140 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5141 }
5142
5143 /* Check DPL against CPL if applicable. */
5144 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5145 {
5146 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5147 {
5148 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5149 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5150 }
5151 }
5152
5153 /* Is it there? */
5154 if (!Idte.Gate.u1Present)
5155 {
5156 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5157 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5158 }
5159
5160 /* A null CS is bad. */
5161 RTSEL NewCS = Idte.Gate.u16Sel;
5162 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5163 {
5164 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5165 return iemRaiseGeneralProtectionFault0(pVCpu);
5166 }
5167
5168 /* Fetch the descriptor for the new CS. */
5169 IEMSELDESC DescCS;
5170 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5171 if (rcStrict != VINF_SUCCESS)
5172 {
5173 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5174 return rcStrict;
5175 }
5176
5177 /* Must be a 64-bit code segment. */
5178 if (!DescCS.Long.Gen.u1DescType)
5179 {
5180 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5181 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5182 }
5183 if ( !DescCS.Long.Gen.u1Long
5184 || DescCS.Long.Gen.u1DefBig
5185 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5186 {
5187 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5188 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5189 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5190 }
5191
5192 /* Don't allow lowering the privilege level. For non-conforming CS
5193 selectors, the CS.DPL sets the privilege level the trap/interrupt
5194 handler runs at. For conforming CS selectors, the CPL remains
5195 unchanged, but the CS.DPL must be <= CPL. */
5196 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5197 * when CPU in Ring-0. Result \#GP? */
5198 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5199 {
5200 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5201 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5202 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5203 }
5204
5205
5206 /* Make sure the selector is present. */
5207 if (!DescCS.Legacy.Gen.u1Present)
5208 {
5209 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5210 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5211 }
5212
5213 /* Check that the new RIP is canonical. */
5214 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5215 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5216 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5217 if (!IEM_IS_CANONICAL(uNewRip))
5218 {
5219 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5220 return iemRaiseGeneralProtectionFault0(pVCpu);
5221 }
5222
5223 /*
5224 * If the privilege level changes or if the IST isn't zero, we need to get
5225 * a new stack from the TSS.
5226 */
5227 uint64_t uNewRsp;
5228 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5229 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5230 if ( uNewCpl != pVCpu->iem.s.uCpl
5231 || Idte.Gate.u3IST != 0)
5232 {
5233 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5234 if (rcStrict != VINF_SUCCESS)
5235 return rcStrict;
5236 }
5237 else
5238 uNewRsp = pCtx->rsp;
5239 uNewRsp &= ~(uint64_t)0xf;
5240
5241 /*
5242 * Calc the flag image to push.
5243 */
5244 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5245 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5246 fEfl &= ~X86_EFL_RF;
5247 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5248 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5249
5250 /*
5251 * Start making changes.
5252 */
5253 /* Set the new CPL so that stack accesses use it. */
5254 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5255 pVCpu->iem.s.uCpl = uNewCpl;
5256
5257 /* Create the stack frame. */
5258 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5259 RTPTRUNION uStackFrame;
5260 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5261 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5262 if (rcStrict != VINF_SUCCESS)
5263 return rcStrict;
5264 void * const pvStackFrame = uStackFrame.pv;
5265
5266 if (fFlags & IEM_XCPT_FLAGS_ERR)
5267 *uStackFrame.pu64++ = uErr;
5268 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5269 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5270 uStackFrame.pu64[2] = fEfl;
5271 uStackFrame.pu64[3] = pCtx->rsp;
5272 uStackFrame.pu64[4] = pCtx->ss.Sel;
5273 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5274 if (rcStrict != VINF_SUCCESS)
5275 return rcStrict;
5276
5277 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5278 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5279 * after pushing the stack frame? (Write protect the gdt + stack to
5280 * find out.) */
5281 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5282 {
5283 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5284 if (rcStrict != VINF_SUCCESS)
5285 return rcStrict;
5286 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5287 }
5288
5289 /*
5290 * Start comitting the register changes.
5291 */
5292 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5293 * hidden registers when interrupting 32-bit or 16-bit code! */
5294 if (uNewCpl != uOldCpl)
5295 {
5296 pCtx->ss.Sel = 0 | uNewCpl;
5297 pCtx->ss.ValidSel = 0 | uNewCpl;
5298 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5299 pCtx->ss.u32Limit = UINT32_MAX;
5300 pCtx->ss.u64Base = 0;
5301 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5302 }
5303 pCtx->rsp = uNewRsp - cbStackFrame;
5304 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5305 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5306 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5307 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5308 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5309 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5310 pCtx->rip = uNewRip;
5311
5312 fEfl &= ~fEflToClear;
5313 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5314
5315 if (fFlags & IEM_XCPT_FLAGS_CR2)
5316 pCtx->cr2 = uCr2;
5317
5318 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5319 iemRaiseXcptAdjustState(pCtx, u8Vector);
5320
5321 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5322}
5323
5324
5325/**
5326 * Implements exceptions and interrupts.
5327 *
5328 * All exceptions and interrupts goes thru this function!
5329 *
5330 * @returns VBox strict status code.
5331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5332 * @param cbInstr The number of bytes to offset rIP by in the return
5333 * address.
5334 * @param u8Vector The interrupt / exception vector number.
5335 * @param fFlags The flags.
5336 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5337 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5338 */
5339DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5340iemRaiseXcptOrInt(PVMCPU pVCpu,
5341 uint8_t cbInstr,
5342 uint8_t u8Vector,
5343 uint32_t fFlags,
5344 uint16_t uErr,
5345 uint64_t uCr2)
5346{
5347 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5348#ifdef IN_RING0
5349 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5350 AssertRCReturn(rc, rc);
5351#endif
5352
5353#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5354 /*
5355 * Flush prefetch buffer
5356 */
5357 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5358#endif
5359
5360 /*
5361 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5362 */
5363 if ( pCtx->eflags.Bits.u1VM
5364 && pCtx->eflags.Bits.u2IOPL != 3
5365 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5366 && (pCtx->cr0 & X86_CR0_PE) )
5367 {
5368 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5369 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5370 u8Vector = X86_XCPT_GP;
5371 uErr = 0;
5372 }
5373#ifdef DBGFTRACE_ENABLED
5374 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5375 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5376 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5377#endif
5378
5379#ifdef VBOX_WITH_NESTED_HWVIRT
5380 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5381 {
5382 /*
5383 * If the event is being injected as part of VMRUN, it isn't subject to event
5384 * intercepts in the nested-guest. However, secondary exceptions that occur
5385 * during injection of any event -are- subject to exception intercepts.
5386 * See AMD spec. 15.20 "Event Injection".
5387 */
5388 if (!pCtx->hwvirt.svm.fInterceptEvents)
5389 pCtx->hwvirt.svm.fInterceptEvents = 1;
5390 else
5391 {
5392 /*
5393 * Check and handle if the event being raised is intercepted.
5394 */
5395 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5396 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5397 return rcStrict0;
5398 }
5399 }
5400#endif /* VBOX_WITH_NESTED_HWVIRT */
5401
5402 /*
5403 * Do recursion accounting.
5404 */
5405 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5406 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5407 if (pVCpu->iem.s.cXcptRecursions == 0)
5408 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5409 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5410 else
5411 {
5412 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5413 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5414 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5415
5416 if (pVCpu->iem.s.cXcptRecursions >= 3)
5417 {
5418#ifdef DEBUG_bird
5419 AssertFailed();
5420#endif
5421 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5422 }
5423
5424 /*
5425 * Evaluate the sequence of recurring events.
5426 */
5427 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5428 NULL /* pXcptRaiseInfo */);
5429 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5430 { /* likely */ }
5431 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5432 {
5433 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5434 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5435 u8Vector = X86_XCPT_DF;
5436 uErr = 0;
5437 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5438 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5439 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5440 }
5441 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5442 {
5443 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5444 return iemInitiateCpuShutdown(pVCpu);
5445 }
5446 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5447 {
5448 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5449 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5450 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5451 return VERR_EM_GUEST_CPU_HANG;
5452 }
5453 else
5454 {
5455 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5456 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5457 return VERR_IEM_IPE_9;
5458 }
5459
5460 /*
5461 * The 'EXT' bit is set when an exception occurs during deliver of an external
5462 * event (such as an interrupt or earlier exception)[1]. Privileged software
5463 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5464 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5465 *
5466 * [1] - Intel spec. 6.13 "Error Code"
5467 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5468 * [3] - Intel Instruction reference for INT n.
5469 */
5470 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5471 && (fFlags & IEM_XCPT_FLAGS_ERR)
5472 && u8Vector != X86_XCPT_PF
5473 && u8Vector != X86_XCPT_DF)
5474 {
5475 uErr |= X86_TRAP_ERR_EXTERNAL;
5476 }
5477 }
5478
5479 pVCpu->iem.s.cXcptRecursions++;
5480 pVCpu->iem.s.uCurXcpt = u8Vector;
5481 pVCpu->iem.s.fCurXcpt = fFlags;
5482 pVCpu->iem.s.uCurXcptErr = uErr;
5483 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5484
5485 /*
5486 * Extensive logging.
5487 */
5488#if defined(LOG_ENABLED) && defined(IN_RING3)
5489 if (LogIs3Enabled())
5490 {
5491 PVM pVM = pVCpu->CTX_SUFF(pVM);
5492 char szRegs[4096];
5493 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5494 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5495 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5496 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5497 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5498 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5499 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5500 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5501 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5502 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5503 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5504 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5505 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5506 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5507 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5508 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5509 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5510 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5511 " efer=%016VR{efer}\n"
5512 " pat=%016VR{pat}\n"
5513 " sf_mask=%016VR{sf_mask}\n"
5514 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5515 " lstar=%016VR{lstar}\n"
5516 " star=%016VR{star} cstar=%016VR{cstar}\n"
5517 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5518 );
5519
5520 char szInstr[256];
5521 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5522 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5523 szInstr, sizeof(szInstr), NULL);
5524 Log3(("%s%s\n", szRegs, szInstr));
5525 }
5526#endif /* LOG_ENABLED */
5527
5528 /*
5529 * Call the mode specific worker function.
5530 */
5531 VBOXSTRICTRC rcStrict;
5532 if (!(pCtx->cr0 & X86_CR0_PE))
5533 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5534 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5535 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5536 else
5537 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5538
5539 /* Flush the prefetch buffer. */
5540#ifdef IEM_WITH_CODE_TLB
5541 pVCpu->iem.s.pbInstrBuf = NULL;
5542#else
5543 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5544#endif
5545
5546 /*
5547 * Unwind.
5548 */
5549 pVCpu->iem.s.cXcptRecursions--;
5550 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5551 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5552 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5553 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5554 return rcStrict;
5555}
5556
5557#ifdef IEM_WITH_SETJMP
5558/**
5559 * See iemRaiseXcptOrInt. Will not return.
5560 */
5561IEM_STATIC DECL_NO_RETURN(void)
5562iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5563 uint8_t cbInstr,
5564 uint8_t u8Vector,
5565 uint32_t fFlags,
5566 uint16_t uErr,
5567 uint64_t uCr2)
5568{
5569 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5570 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5571}
5572#endif
5573
5574
5575/** \#DE - 00. */
5576DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5577{
5578 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5579}
5580
5581
5582/** \#DB - 01.
5583 * @note This automatically clear DR7.GD. */
5584DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5585{
5586 /** @todo set/clear RF. */
5587 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5588 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5589}
5590
5591
5592/** \#BR - 05. */
5593DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5594{
5595 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5596}
5597
5598
5599/** \#UD - 06. */
5600DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5601{
5602 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5603}
5604
5605
5606/** \#NM - 07. */
5607DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5608{
5609 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5610}
5611
5612
5613/** \#TS(err) - 0a. */
5614DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5615{
5616 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5617}
5618
5619
5620/** \#TS(tr) - 0a. */
5621DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5622{
5623 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5624 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5625}
5626
5627
5628/** \#TS(0) - 0a. */
5629DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5630{
5631 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5632 0, 0);
5633}
5634
5635
5636/** \#TS(err) - 0a. */
5637DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5638{
5639 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5640 uSel & X86_SEL_MASK_OFF_RPL, 0);
5641}
5642
5643
5644/** \#NP(err) - 0b. */
5645DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5646{
5647 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5648}
5649
5650
5651/** \#NP(sel) - 0b. */
5652DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5653{
5654 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5655 uSel & ~X86_SEL_RPL, 0);
5656}
5657
5658
5659/** \#SS(seg) - 0c. */
5660DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5661{
5662 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5663 uSel & ~X86_SEL_RPL, 0);
5664}
5665
5666
5667/** \#SS(err) - 0c. */
5668DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5669{
5670 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5671}
5672
5673
5674/** \#GP(n) - 0d. */
5675DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5676{
5677 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5678}
5679
5680
5681/** \#GP(0) - 0d. */
5682DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5683{
5684 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5685}
5686
5687#ifdef IEM_WITH_SETJMP
5688/** \#GP(0) - 0d. */
5689DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5690{
5691 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5692}
5693#endif
5694
5695
5696/** \#GP(sel) - 0d. */
5697DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5698{
5699 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5700 Sel & ~X86_SEL_RPL, 0);
5701}
5702
5703
5704/** \#GP(0) - 0d. */
5705DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5706{
5707 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5708}
5709
5710
5711/** \#GP(sel) - 0d. */
5712DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5713{
5714 NOREF(iSegReg); NOREF(fAccess);
5715 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5716 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5717}
5718
5719#ifdef IEM_WITH_SETJMP
5720/** \#GP(sel) - 0d, longjmp. */
5721DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5722{
5723 NOREF(iSegReg); NOREF(fAccess);
5724 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5725 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5726}
5727#endif
5728
5729/** \#GP(sel) - 0d. */
5730DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5731{
5732 NOREF(Sel);
5733 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5734}
5735
5736#ifdef IEM_WITH_SETJMP
5737/** \#GP(sel) - 0d, longjmp. */
5738DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5739{
5740 NOREF(Sel);
5741 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5742}
5743#endif
5744
5745
5746/** \#GP(sel) - 0d. */
5747DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5748{
5749 NOREF(iSegReg); NOREF(fAccess);
5750 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5751}
5752
5753#ifdef IEM_WITH_SETJMP
5754/** \#GP(sel) - 0d, longjmp. */
5755DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5756 uint32_t fAccess)
5757{
5758 NOREF(iSegReg); NOREF(fAccess);
5759 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5760}
5761#endif
5762
5763
5764/** \#PF(n) - 0e. */
5765DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5766{
5767 uint16_t uErr;
5768 switch (rc)
5769 {
5770 case VERR_PAGE_NOT_PRESENT:
5771 case VERR_PAGE_TABLE_NOT_PRESENT:
5772 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5773 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5774 uErr = 0;
5775 break;
5776
5777 default:
5778 AssertMsgFailed(("%Rrc\n", rc));
5779 RT_FALL_THRU();
5780 case VERR_ACCESS_DENIED:
5781 uErr = X86_TRAP_PF_P;
5782 break;
5783
5784 /** @todo reserved */
5785 }
5786
5787 if (pVCpu->iem.s.uCpl == 3)
5788 uErr |= X86_TRAP_PF_US;
5789
5790 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5791 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5792 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5793 uErr |= X86_TRAP_PF_ID;
5794
5795#if 0 /* This is so much non-sense, really. Why was it done like that? */
5796 /* Note! RW access callers reporting a WRITE protection fault, will clear
5797 the READ flag before calling. So, read-modify-write accesses (RW)
5798 can safely be reported as READ faults. */
5799 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5800 uErr |= X86_TRAP_PF_RW;
5801#else
5802 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5803 {
5804 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5805 uErr |= X86_TRAP_PF_RW;
5806 }
5807#endif
5808
5809 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5810 uErr, GCPtrWhere);
5811}
5812
5813#ifdef IEM_WITH_SETJMP
5814/** \#PF(n) - 0e, longjmp. */
5815IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5816{
5817 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5818}
5819#endif
5820
5821
5822/** \#MF(0) - 10. */
5823DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5824{
5825 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5826}
5827
5828
5829/** \#AC(0) - 11. */
5830DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5831{
5832 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5833}
5834
5835
5836/**
5837 * Macro for calling iemCImplRaiseDivideError().
5838 *
5839 * This enables us to add/remove arguments and force different levels of
5840 * inlining as we wish.
5841 *
5842 * @return Strict VBox status code.
5843 */
5844#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5845IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5846{
5847 NOREF(cbInstr);
5848 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5849}
5850
5851
5852/**
5853 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5854 *
5855 * This enables us to add/remove arguments and force different levels of
5856 * inlining as we wish.
5857 *
5858 * @return Strict VBox status code.
5859 */
5860#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5861IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5862{
5863 NOREF(cbInstr);
5864 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5865}
5866
5867
5868/**
5869 * Macro for calling iemCImplRaiseInvalidOpcode().
5870 *
5871 * This enables us to add/remove arguments and force different levels of
5872 * inlining as we wish.
5873 *
5874 * @return Strict VBox status code.
5875 */
5876#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5877IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5878{
5879 NOREF(cbInstr);
5880 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5881}
5882
5883
5884/** @} */
5885
5886
5887/*
5888 *
5889 * Helpers routines.
5890 * Helpers routines.
5891 * Helpers routines.
5892 *
5893 */
5894
5895/**
5896 * Recalculates the effective operand size.
5897 *
5898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5899 */
5900IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5901{
5902 switch (pVCpu->iem.s.enmCpuMode)
5903 {
5904 case IEMMODE_16BIT:
5905 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5906 break;
5907 case IEMMODE_32BIT:
5908 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5909 break;
5910 case IEMMODE_64BIT:
5911 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5912 {
5913 case 0:
5914 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5915 break;
5916 case IEM_OP_PRF_SIZE_OP:
5917 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5918 break;
5919 case IEM_OP_PRF_SIZE_REX_W:
5920 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5921 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5922 break;
5923 }
5924 break;
5925 default:
5926 AssertFailed();
5927 }
5928}
5929
5930
5931/**
5932 * Sets the default operand size to 64-bit and recalculates the effective
5933 * operand size.
5934 *
5935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5936 */
5937IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5938{
5939 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5940 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5941 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5942 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5943 else
5944 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5945}
5946
5947
5948/*
5949 *
5950 * Common opcode decoders.
5951 * Common opcode decoders.
5952 * Common opcode decoders.
5953 *
5954 */
5955//#include <iprt/mem.h>
5956
5957/**
5958 * Used to add extra details about a stub case.
5959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5960 */
5961IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5962{
5963#if defined(LOG_ENABLED) && defined(IN_RING3)
5964 PVM pVM = pVCpu->CTX_SUFF(pVM);
5965 char szRegs[4096];
5966 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5967 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5968 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5969 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5970 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5971 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5972 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5973 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5974 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5975 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5976 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5977 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5978 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5979 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5980 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5981 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5982 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5983 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5984 " efer=%016VR{efer}\n"
5985 " pat=%016VR{pat}\n"
5986 " sf_mask=%016VR{sf_mask}\n"
5987 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5988 " lstar=%016VR{lstar}\n"
5989 " star=%016VR{star} cstar=%016VR{cstar}\n"
5990 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5991 );
5992
5993 char szInstr[256];
5994 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5995 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5996 szInstr, sizeof(szInstr), NULL);
5997
5998 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5999#else
6000 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6001#endif
6002}
6003
6004/**
6005 * Complains about a stub.
6006 *
6007 * Providing two versions of this macro, one for daily use and one for use when
6008 * working on IEM.
6009 */
6010#if 0
6011# define IEMOP_BITCH_ABOUT_STUB() \
6012 do { \
6013 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6014 iemOpStubMsg2(pVCpu); \
6015 RTAssertPanic(); \
6016 } while (0)
6017#else
6018# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6019#endif
6020
6021/** Stubs an opcode. */
6022#define FNIEMOP_STUB(a_Name) \
6023 FNIEMOP_DEF(a_Name) \
6024 { \
6025 RT_NOREF_PV(pVCpu); \
6026 IEMOP_BITCH_ABOUT_STUB(); \
6027 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6028 } \
6029 typedef int ignore_semicolon
6030
6031/** Stubs an opcode. */
6032#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6033 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6034 { \
6035 RT_NOREF_PV(pVCpu); \
6036 RT_NOREF_PV(a_Name0); \
6037 IEMOP_BITCH_ABOUT_STUB(); \
6038 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6039 } \
6040 typedef int ignore_semicolon
6041
6042/** Stubs an opcode which currently should raise \#UD. */
6043#define FNIEMOP_UD_STUB(a_Name) \
6044 FNIEMOP_DEF(a_Name) \
6045 { \
6046 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6047 return IEMOP_RAISE_INVALID_OPCODE(); \
6048 } \
6049 typedef int ignore_semicolon
6050
6051/** Stubs an opcode which currently should raise \#UD. */
6052#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6053 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6054 { \
6055 RT_NOREF_PV(pVCpu); \
6056 RT_NOREF_PV(a_Name0); \
6057 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6058 return IEMOP_RAISE_INVALID_OPCODE(); \
6059 } \
6060 typedef int ignore_semicolon
6061
6062
6063
6064/** @name Register Access.
6065 * @{
6066 */
6067
6068/**
6069 * Gets a reference (pointer) to the specified hidden segment register.
6070 *
6071 * @returns Hidden register reference.
6072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6073 * @param iSegReg The segment register.
6074 */
6075IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6076{
6077 Assert(iSegReg < X86_SREG_COUNT);
6078 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6079 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6080
6081#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6082 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6083 { /* likely */ }
6084 else
6085 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6086#else
6087 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6088#endif
6089 return pSReg;
6090}
6091
6092
6093/**
6094 * Ensures that the given hidden segment register is up to date.
6095 *
6096 * @returns Hidden register reference.
6097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6098 * @param pSReg The segment register.
6099 */
6100IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6101{
6102#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6103 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6104 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6105#else
6106 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6107 NOREF(pVCpu);
6108#endif
6109 return pSReg;
6110}
6111
6112
6113/**
6114 * Gets a reference (pointer) to the specified segment register (the selector
6115 * value).
6116 *
6117 * @returns Pointer to the selector variable.
6118 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6119 * @param iSegReg The segment register.
6120 */
6121DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6122{
6123 Assert(iSegReg < X86_SREG_COUNT);
6124 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6125 return &pCtx->aSRegs[iSegReg].Sel;
6126}
6127
6128
6129/**
6130 * Fetches the selector value of a segment register.
6131 *
6132 * @returns The selector value.
6133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6134 * @param iSegReg The segment register.
6135 */
6136DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6137{
6138 Assert(iSegReg < X86_SREG_COUNT);
6139 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6140}
6141
6142
6143/**
6144 * Gets a reference (pointer) to the specified general purpose register.
6145 *
6146 * @returns Register reference.
6147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6148 * @param iReg The general purpose register.
6149 */
6150DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6151{
6152 Assert(iReg < 16);
6153 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6154 return &pCtx->aGRegs[iReg];
6155}
6156
6157
6158/**
6159 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6160 *
6161 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6162 *
6163 * @returns Register reference.
6164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6165 * @param iReg The register.
6166 */
6167DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6168{
6169 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6170 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6171 {
6172 Assert(iReg < 16);
6173 return &pCtx->aGRegs[iReg].u8;
6174 }
6175 /* high 8-bit register. */
6176 Assert(iReg < 8);
6177 return &pCtx->aGRegs[iReg & 3].bHi;
6178}
6179
6180
6181/**
6182 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6183 *
6184 * @returns Register reference.
6185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6186 * @param iReg The register.
6187 */
6188DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6189{
6190 Assert(iReg < 16);
6191 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6192 return &pCtx->aGRegs[iReg].u16;
6193}
6194
6195
6196/**
6197 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6198 *
6199 * @returns Register reference.
6200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6201 * @param iReg The register.
6202 */
6203DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6204{
6205 Assert(iReg < 16);
6206 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6207 return &pCtx->aGRegs[iReg].u32;
6208}
6209
6210
6211/**
6212 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6213 *
6214 * @returns Register reference.
6215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6216 * @param iReg The register.
6217 */
6218DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6219{
6220 Assert(iReg < 64);
6221 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6222 return &pCtx->aGRegs[iReg].u64;
6223}
6224
6225
6226/**
6227 * Fetches the value of a 8-bit general purpose register.
6228 *
6229 * @returns The register value.
6230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6231 * @param iReg The register.
6232 */
6233DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6234{
6235 return *iemGRegRefU8(pVCpu, iReg);
6236}
6237
6238
6239/**
6240 * Fetches the value of a 16-bit general purpose register.
6241 *
6242 * @returns The register value.
6243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6244 * @param iReg The register.
6245 */
6246DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6247{
6248 Assert(iReg < 16);
6249 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6250}
6251
6252
6253/**
6254 * Fetches the value of a 32-bit general purpose register.
6255 *
6256 * @returns The register value.
6257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6258 * @param iReg The register.
6259 */
6260DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6261{
6262 Assert(iReg < 16);
6263 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6264}
6265
6266
6267/**
6268 * Fetches the value of a 64-bit general purpose register.
6269 *
6270 * @returns The register value.
6271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6272 * @param iReg The register.
6273 */
6274DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6275{
6276 Assert(iReg < 16);
6277 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6278}
6279
6280
6281/**
6282 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6283 *
6284 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6285 * segment limit.
6286 *
6287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6288 * @param offNextInstr The offset of the next instruction.
6289 */
6290IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6291{
6292 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6293 switch (pVCpu->iem.s.enmEffOpSize)
6294 {
6295 case IEMMODE_16BIT:
6296 {
6297 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6298 if ( uNewIp > pCtx->cs.u32Limit
6299 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6300 return iemRaiseGeneralProtectionFault0(pVCpu);
6301 pCtx->rip = uNewIp;
6302 break;
6303 }
6304
6305 case IEMMODE_32BIT:
6306 {
6307 Assert(pCtx->rip <= UINT32_MAX);
6308 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6309
6310 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6311 if (uNewEip > pCtx->cs.u32Limit)
6312 return iemRaiseGeneralProtectionFault0(pVCpu);
6313 pCtx->rip = uNewEip;
6314 break;
6315 }
6316
6317 case IEMMODE_64BIT:
6318 {
6319 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6320
6321 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6322 if (!IEM_IS_CANONICAL(uNewRip))
6323 return iemRaiseGeneralProtectionFault0(pVCpu);
6324 pCtx->rip = uNewRip;
6325 break;
6326 }
6327
6328 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6329 }
6330
6331 pCtx->eflags.Bits.u1RF = 0;
6332
6333#ifndef IEM_WITH_CODE_TLB
6334 /* Flush the prefetch buffer. */
6335 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6336#endif
6337
6338 return VINF_SUCCESS;
6339}
6340
6341
6342/**
6343 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6344 *
6345 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6346 * segment limit.
6347 *
6348 * @returns Strict VBox status code.
6349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6350 * @param offNextInstr The offset of the next instruction.
6351 */
6352IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6353{
6354 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6355 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6356
6357 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6358 if ( uNewIp > pCtx->cs.u32Limit
6359 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6360 return iemRaiseGeneralProtectionFault0(pVCpu);
6361 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6362 pCtx->rip = uNewIp;
6363 pCtx->eflags.Bits.u1RF = 0;
6364
6365#ifndef IEM_WITH_CODE_TLB
6366 /* Flush the prefetch buffer. */
6367 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6368#endif
6369
6370 return VINF_SUCCESS;
6371}
6372
6373
6374/**
6375 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6376 *
6377 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6378 * segment limit.
6379 *
6380 * @returns Strict VBox status code.
6381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6382 * @param offNextInstr The offset of the next instruction.
6383 */
6384IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6385{
6386 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6387 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6388
6389 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6390 {
6391 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6392
6393 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6394 if (uNewEip > pCtx->cs.u32Limit)
6395 return iemRaiseGeneralProtectionFault0(pVCpu);
6396 pCtx->rip = uNewEip;
6397 }
6398 else
6399 {
6400 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6401
6402 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6403 if (!IEM_IS_CANONICAL(uNewRip))
6404 return iemRaiseGeneralProtectionFault0(pVCpu);
6405 pCtx->rip = uNewRip;
6406 }
6407 pCtx->eflags.Bits.u1RF = 0;
6408
6409#ifndef IEM_WITH_CODE_TLB
6410 /* Flush the prefetch buffer. */
6411 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6412#endif
6413
6414 return VINF_SUCCESS;
6415}
6416
6417
6418/**
6419 * Performs a near jump to the specified address.
6420 *
6421 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6422 * segment limit.
6423 *
6424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6425 * @param uNewRip The new RIP value.
6426 */
6427IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6428{
6429 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6430 switch (pVCpu->iem.s.enmEffOpSize)
6431 {
6432 case IEMMODE_16BIT:
6433 {
6434 Assert(uNewRip <= UINT16_MAX);
6435 if ( uNewRip > pCtx->cs.u32Limit
6436 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6437 return iemRaiseGeneralProtectionFault0(pVCpu);
6438 /** @todo Test 16-bit jump in 64-bit mode. */
6439 pCtx->rip = uNewRip;
6440 break;
6441 }
6442
6443 case IEMMODE_32BIT:
6444 {
6445 Assert(uNewRip <= UINT32_MAX);
6446 Assert(pCtx->rip <= UINT32_MAX);
6447 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6448
6449 if (uNewRip > pCtx->cs.u32Limit)
6450 return iemRaiseGeneralProtectionFault0(pVCpu);
6451 pCtx->rip = uNewRip;
6452 break;
6453 }
6454
6455 case IEMMODE_64BIT:
6456 {
6457 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6458
6459 if (!IEM_IS_CANONICAL(uNewRip))
6460 return iemRaiseGeneralProtectionFault0(pVCpu);
6461 pCtx->rip = uNewRip;
6462 break;
6463 }
6464
6465 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6466 }
6467
6468 pCtx->eflags.Bits.u1RF = 0;
6469
6470#ifndef IEM_WITH_CODE_TLB
6471 /* Flush the prefetch buffer. */
6472 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6473#endif
6474
6475 return VINF_SUCCESS;
6476}
6477
6478
6479/**
6480 * Get the address of the top of the stack.
6481 *
6482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6483 * @param pCtx The CPU context which SP/ESP/RSP should be
6484 * read.
6485 */
6486DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6487{
6488 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6489 return pCtx->rsp;
6490 if (pCtx->ss.Attr.n.u1DefBig)
6491 return pCtx->esp;
6492 return pCtx->sp;
6493}
6494
6495
6496/**
6497 * Updates the RIP/EIP/IP to point to the next instruction.
6498 *
6499 * This function leaves the EFLAGS.RF flag alone.
6500 *
6501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6502 * @param cbInstr The number of bytes to add.
6503 */
6504IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6505{
6506 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6507 switch (pVCpu->iem.s.enmCpuMode)
6508 {
6509 case IEMMODE_16BIT:
6510 Assert(pCtx->rip <= UINT16_MAX);
6511 pCtx->eip += cbInstr;
6512 pCtx->eip &= UINT32_C(0xffff);
6513 break;
6514
6515 case IEMMODE_32BIT:
6516 pCtx->eip += cbInstr;
6517 Assert(pCtx->rip <= UINT32_MAX);
6518 break;
6519
6520 case IEMMODE_64BIT:
6521 pCtx->rip += cbInstr;
6522 break;
6523 default: AssertFailed();
6524 }
6525}
6526
6527
6528#if 0
6529/**
6530 * Updates the RIP/EIP/IP to point to the next instruction.
6531 *
6532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6533 */
6534IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6535{
6536 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6537}
6538#endif
6539
6540
6541
6542/**
6543 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6544 *
6545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6546 * @param cbInstr The number of bytes to add.
6547 */
6548IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6549{
6550 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6551
6552 pCtx->eflags.Bits.u1RF = 0;
6553
6554 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6555#if ARCH_BITS >= 64
6556 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6557 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6558 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6559#else
6560 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6561 pCtx->rip += cbInstr;
6562 else
6563 pCtx->eip += cbInstr;
6564#endif
6565}
6566
6567
6568/**
6569 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6570 *
6571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6572 */
6573IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6574{
6575 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6576}
6577
6578
6579/**
6580 * Adds to the stack pointer.
6581 *
6582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6583 * @param pCtx The CPU context which SP/ESP/RSP should be
6584 * updated.
6585 * @param cbToAdd The number of bytes to add (8-bit!).
6586 */
6587DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6588{
6589 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6590 pCtx->rsp += cbToAdd;
6591 else if (pCtx->ss.Attr.n.u1DefBig)
6592 pCtx->esp += cbToAdd;
6593 else
6594 pCtx->sp += cbToAdd;
6595}
6596
6597
6598/**
6599 * Subtracts from the stack pointer.
6600 *
6601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6602 * @param pCtx The CPU context which SP/ESP/RSP should be
6603 * updated.
6604 * @param cbToSub The number of bytes to subtract (8-bit!).
6605 */
6606DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6607{
6608 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6609 pCtx->rsp -= cbToSub;
6610 else if (pCtx->ss.Attr.n.u1DefBig)
6611 pCtx->esp -= cbToSub;
6612 else
6613 pCtx->sp -= cbToSub;
6614}
6615
6616
6617/**
6618 * Adds to the temporary stack pointer.
6619 *
6620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6621 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6622 * @param cbToAdd The number of bytes to add (16-bit).
6623 * @param pCtx Where to get the current stack mode.
6624 */
6625DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6626{
6627 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6628 pTmpRsp->u += cbToAdd;
6629 else if (pCtx->ss.Attr.n.u1DefBig)
6630 pTmpRsp->DWords.dw0 += cbToAdd;
6631 else
6632 pTmpRsp->Words.w0 += cbToAdd;
6633}
6634
6635
6636/**
6637 * Subtracts from the temporary stack pointer.
6638 *
6639 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6640 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6641 * @param cbToSub The number of bytes to subtract.
6642 * @param pCtx Where to get the current stack mode.
6643 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6644 * expecting that.
6645 */
6646DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6647{
6648 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6649 pTmpRsp->u -= cbToSub;
6650 else if (pCtx->ss.Attr.n.u1DefBig)
6651 pTmpRsp->DWords.dw0 -= cbToSub;
6652 else
6653 pTmpRsp->Words.w0 -= cbToSub;
6654}
6655
6656
6657/**
6658 * Calculates the effective stack address for a push of the specified size as
6659 * well as the new RSP value (upper bits may be masked).
6660 *
6661 * @returns Effective stack addressf for the push.
6662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6663 * @param pCtx Where to get the current stack mode.
6664 * @param cbItem The size of the stack item to pop.
6665 * @param puNewRsp Where to return the new RSP value.
6666 */
6667DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6668{
6669 RTUINT64U uTmpRsp;
6670 RTGCPTR GCPtrTop;
6671 uTmpRsp.u = pCtx->rsp;
6672
6673 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6674 GCPtrTop = uTmpRsp.u -= cbItem;
6675 else if (pCtx->ss.Attr.n.u1DefBig)
6676 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6677 else
6678 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6679 *puNewRsp = uTmpRsp.u;
6680 return GCPtrTop;
6681}
6682
6683
6684/**
6685 * Gets the current stack pointer and calculates the value after a pop of the
6686 * specified size.
6687 *
6688 * @returns Current stack pointer.
6689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6690 * @param pCtx Where to get the current stack mode.
6691 * @param cbItem The size of the stack item to pop.
6692 * @param puNewRsp Where to return the new RSP value.
6693 */
6694DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6695{
6696 RTUINT64U uTmpRsp;
6697 RTGCPTR GCPtrTop;
6698 uTmpRsp.u = pCtx->rsp;
6699
6700 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6701 {
6702 GCPtrTop = uTmpRsp.u;
6703 uTmpRsp.u += cbItem;
6704 }
6705 else if (pCtx->ss.Attr.n.u1DefBig)
6706 {
6707 GCPtrTop = uTmpRsp.DWords.dw0;
6708 uTmpRsp.DWords.dw0 += cbItem;
6709 }
6710 else
6711 {
6712 GCPtrTop = uTmpRsp.Words.w0;
6713 uTmpRsp.Words.w0 += cbItem;
6714 }
6715 *puNewRsp = uTmpRsp.u;
6716 return GCPtrTop;
6717}
6718
6719
6720/**
6721 * Calculates the effective stack address for a push of the specified size as
6722 * well as the new temporary RSP value (upper bits may be masked).
6723 *
6724 * @returns Effective stack addressf for the push.
6725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6726 * @param pCtx Where to get the current stack mode.
6727 * @param pTmpRsp The temporary stack pointer. This is updated.
6728 * @param cbItem The size of the stack item to pop.
6729 */
6730DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6731{
6732 RTGCPTR GCPtrTop;
6733
6734 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6735 GCPtrTop = pTmpRsp->u -= cbItem;
6736 else if (pCtx->ss.Attr.n.u1DefBig)
6737 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6738 else
6739 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6740 return GCPtrTop;
6741}
6742
6743
6744/**
6745 * Gets the effective stack address for a pop of the specified size and
6746 * calculates and updates the temporary RSP.
6747 *
6748 * @returns Current stack pointer.
6749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6750 * @param pCtx Where to get the current stack mode.
6751 * @param pTmpRsp The temporary stack pointer. This is updated.
6752 * @param cbItem The size of the stack item to pop.
6753 */
6754DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6755{
6756 RTGCPTR GCPtrTop;
6757 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6758 {
6759 GCPtrTop = pTmpRsp->u;
6760 pTmpRsp->u += cbItem;
6761 }
6762 else if (pCtx->ss.Attr.n.u1DefBig)
6763 {
6764 GCPtrTop = pTmpRsp->DWords.dw0;
6765 pTmpRsp->DWords.dw0 += cbItem;
6766 }
6767 else
6768 {
6769 GCPtrTop = pTmpRsp->Words.w0;
6770 pTmpRsp->Words.w0 += cbItem;
6771 }
6772 return GCPtrTop;
6773}
6774
6775/** @} */
6776
6777
6778/** @name FPU access and helpers.
6779 *
6780 * @{
6781 */
6782
6783
6784/**
6785 * Hook for preparing to use the host FPU.
6786 *
6787 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6788 *
6789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6790 */
6791DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6792{
6793#ifdef IN_RING3
6794 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6795#else
6796 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6797#endif
6798}
6799
6800
6801/**
6802 * Hook for preparing to use the host FPU for SSE.
6803 *
6804 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6805 *
6806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6807 */
6808DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6809{
6810 iemFpuPrepareUsage(pVCpu);
6811}
6812
6813
6814/**
6815 * Hook for preparing to use the host FPU for AVX.
6816 *
6817 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6818 *
6819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6820 */
6821DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6822{
6823 iemFpuPrepareUsage(pVCpu);
6824}
6825
6826
6827/**
6828 * Hook for actualizing the guest FPU state before the interpreter reads it.
6829 *
6830 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6831 *
6832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6833 */
6834DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6835{
6836#ifdef IN_RING3
6837 NOREF(pVCpu);
6838#else
6839 CPUMRZFpuStateActualizeForRead(pVCpu);
6840#endif
6841}
6842
6843
6844/**
6845 * Hook for actualizing the guest FPU state before the interpreter changes it.
6846 *
6847 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6848 *
6849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6850 */
6851DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6852{
6853#ifdef IN_RING3
6854 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6855#else
6856 CPUMRZFpuStateActualizeForChange(pVCpu);
6857#endif
6858}
6859
6860
6861/**
6862 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6863 * only.
6864 *
6865 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6866 *
6867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6868 */
6869DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6870{
6871#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6872 NOREF(pVCpu);
6873#else
6874 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6875#endif
6876}
6877
6878
6879/**
6880 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6881 * read+write.
6882 *
6883 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6884 *
6885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6886 */
6887DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6888{
6889#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6890 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6891#else
6892 CPUMRZFpuStateActualizeForChange(pVCpu);
6893#endif
6894}
6895
6896
6897/**
6898 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6899 * only.
6900 *
6901 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6902 *
6903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6904 */
6905DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6906{
6907#ifdef IN_RING3
6908 NOREF(pVCpu);
6909#else
6910 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6911#endif
6912}
6913
6914
6915/**
6916 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6917 * read+write.
6918 *
6919 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6920 *
6921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6922 */
6923DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
6924{
6925#ifdef IN_RING3
6926 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6927#else
6928 CPUMRZFpuStateActualizeForChange(pVCpu);
6929#endif
6930}
6931
6932
6933/**
6934 * Stores a QNaN value into a FPU register.
6935 *
6936 * @param pReg Pointer to the register.
6937 */
6938DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6939{
6940 pReg->au32[0] = UINT32_C(0x00000000);
6941 pReg->au32[1] = UINT32_C(0xc0000000);
6942 pReg->au16[4] = UINT16_C(0xffff);
6943}
6944
6945
6946/**
6947 * Updates the FOP, FPU.CS and FPUIP registers.
6948 *
6949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6950 * @param pCtx The CPU context.
6951 * @param pFpuCtx The FPU context.
6952 */
6953DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6954{
6955 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6956 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6957 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6958 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6959 {
6960 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6961 * happens in real mode here based on the fnsave and fnstenv images. */
6962 pFpuCtx->CS = 0;
6963 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6964 }
6965 else
6966 {
6967 pFpuCtx->CS = pCtx->cs.Sel;
6968 pFpuCtx->FPUIP = pCtx->rip;
6969 }
6970}
6971
6972
6973/**
6974 * Updates the x87.DS and FPUDP registers.
6975 *
6976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6977 * @param pCtx The CPU context.
6978 * @param pFpuCtx The FPU context.
6979 * @param iEffSeg The effective segment register.
6980 * @param GCPtrEff The effective address relative to @a iEffSeg.
6981 */
6982DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6983{
6984 RTSEL sel;
6985 switch (iEffSeg)
6986 {
6987 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6988 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6989 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6990 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6991 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6992 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6993 default:
6994 AssertMsgFailed(("%d\n", iEffSeg));
6995 sel = pCtx->ds.Sel;
6996 }
6997 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6998 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6999 {
7000 pFpuCtx->DS = 0;
7001 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7002 }
7003 else
7004 {
7005 pFpuCtx->DS = sel;
7006 pFpuCtx->FPUDP = GCPtrEff;
7007 }
7008}
7009
7010
7011/**
7012 * Rotates the stack registers in the push direction.
7013 *
7014 * @param pFpuCtx The FPU context.
7015 * @remarks This is a complete waste of time, but fxsave stores the registers in
7016 * stack order.
7017 */
7018DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7019{
7020 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7021 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7022 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7023 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7024 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7025 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7026 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7027 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7028 pFpuCtx->aRegs[0].r80 = r80Tmp;
7029}
7030
7031
7032/**
7033 * Rotates the stack registers in the pop direction.
7034 *
7035 * @param pFpuCtx The FPU context.
7036 * @remarks This is a complete waste of time, but fxsave stores the registers in
7037 * stack order.
7038 */
7039DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7040{
7041 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7042 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7043 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7044 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7045 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7046 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7047 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7048 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7049 pFpuCtx->aRegs[7].r80 = r80Tmp;
7050}
7051
7052
7053/**
7054 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7055 * exception prevents it.
7056 *
7057 * @param pResult The FPU operation result to push.
7058 * @param pFpuCtx The FPU context.
7059 */
7060IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7061{
7062 /* Update FSW and bail if there are pending exceptions afterwards. */
7063 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7064 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7065 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7066 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7067 {
7068 pFpuCtx->FSW = fFsw;
7069 return;
7070 }
7071
7072 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7073 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7074 {
7075 /* All is fine, push the actual value. */
7076 pFpuCtx->FTW |= RT_BIT(iNewTop);
7077 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7078 }
7079 else if (pFpuCtx->FCW & X86_FCW_IM)
7080 {
7081 /* Masked stack overflow, push QNaN. */
7082 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7083 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7084 }
7085 else
7086 {
7087 /* Raise stack overflow, don't push anything. */
7088 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7089 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7090 return;
7091 }
7092
7093 fFsw &= ~X86_FSW_TOP_MASK;
7094 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7095 pFpuCtx->FSW = fFsw;
7096
7097 iemFpuRotateStackPush(pFpuCtx);
7098}
7099
7100
7101/**
7102 * Stores a result in a FPU register and updates the FSW and FTW.
7103 *
7104 * @param pFpuCtx The FPU context.
7105 * @param pResult The result to store.
7106 * @param iStReg Which FPU register to store it in.
7107 */
7108IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7109{
7110 Assert(iStReg < 8);
7111 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7112 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7113 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7114 pFpuCtx->FTW |= RT_BIT(iReg);
7115 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7116}
7117
7118
7119/**
7120 * Only updates the FPU status word (FSW) with the result of the current
7121 * instruction.
7122 *
7123 * @param pFpuCtx The FPU context.
7124 * @param u16FSW The FSW output of the current instruction.
7125 */
7126IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7127{
7128 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7129 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7130}
7131
7132
7133/**
7134 * Pops one item off the FPU stack if no pending exception prevents it.
7135 *
7136 * @param pFpuCtx The FPU context.
7137 */
7138IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7139{
7140 /* Check pending exceptions. */
7141 uint16_t uFSW = pFpuCtx->FSW;
7142 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7143 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7144 return;
7145
7146 /* TOP--. */
7147 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7148 uFSW &= ~X86_FSW_TOP_MASK;
7149 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7150 pFpuCtx->FSW = uFSW;
7151
7152 /* Mark the previous ST0 as empty. */
7153 iOldTop >>= X86_FSW_TOP_SHIFT;
7154 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7155
7156 /* Rotate the registers. */
7157 iemFpuRotateStackPop(pFpuCtx);
7158}
7159
7160
7161/**
7162 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7163 *
7164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7165 * @param pResult The FPU operation result to push.
7166 */
7167IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7168{
7169 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7170 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7171 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7172 iemFpuMaybePushResult(pResult, pFpuCtx);
7173}
7174
7175
7176/**
7177 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7178 * and sets FPUDP and FPUDS.
7179 *
7180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7181 * @param pResult The FPU operation result to push.
7182 * @param iEffSeg The effective segment register.
7183 * @param GCPtrEff The effective address relative to @a iEffSeg.
7184 */
7185IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7186{
7187 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7188 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7189 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7190 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7191 iemFpuMaybePushResult(pResult, pFpuCtx);
7192}
7193
7194
7195/**
7196 * Replace ST0 with the first value and push the second onto the FPU stack,
7197 * unless a pending exception prevents it.
7198 *
7199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7200 * @param pResult The FPU operation result to store and push.
7201 */
7202IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7203{
7204 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7205 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7206 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7207
7208 /* Update FSW and bail if there are pending exceptions afterwards. */
7209 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7210 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7211 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7212 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7213 {
7214 pFpuCtx->FSW = fFsw;
7215 return;
7216 }
7217
7218 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7219 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7220 {
7221 /* All is fine, push the actual value. */
7222 pFpuCtx->FTW |= RT_BIT(iNewTop);
7223 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7224 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7225 }
7226 else if (pFpuCtx->FCW & X86_FCW_IM)
7227 {
7228 /* Masked stack overflow, push QNaN. */
7229 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7230 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7231 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7232 }
7233 else
7234 {
7235 /* Raise stack overflow, don't push anything. */
7236 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7237 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7238 return;
7239 }
7240
7241 fFsw &= ~X86_FSW_TOP_MASK;
7242 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7243 pFpuCtx->FSW = fFsw;
7244
7245 iemFpuRotateStackPush(pFpuCtx);
7246}
7247
7248
7249/**
7250 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7251 * FOP.
7252 *
7253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7254 * @param pResult The result to store.
7255 * @param iStReg Which FPU register to store it in.
7256 */
7257IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7258{
7259 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7260 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7261 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7262 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7263}
7264
7265
7266/**
7267 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7268 * FOP, and then pops the stack.
7269 *
7270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7271 * @param pResult The result to store.
7272 * @param iStReg Which FPU register to store it in.
7273 */
7274IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7275{
7276 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7277 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7278 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7279 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7280 iemFpuMaybePopOne(pFpuCtx);
7281}
7282
7283
7284/**
7285 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7286 * FPUDP, and FPUDS.
7287 *
7288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7289 * @param pResult The result to store.
7290 * @param iStReg Which FPU register to store it in.
7291 * @param iEffSeg The effective memory operand selector register.
7292 * @param GCPtrEff The effective memory operand offset.
7293 */
7294IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7295 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7296{
7297 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7298 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7299 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7300 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7301 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7302}
7303
7304
7305/**
7306 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7307 * FPUDP, and FPUDS, and then pops the stack.
7308 *
7309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7310 * @param pResult The result to store.
7311 * @param iStReg Which FPU register to store it in.
7312 * @param iEffSeg The effective memory operand selector register.
7313 * @param GCPtrEff The effective memory operand offset.
7314 */
7315IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7316 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7317{
7318 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7319 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7320 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7321 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7322 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7323 iemFpuMaybePopOne(pFpuCtx);
7324}
7325
7326
7327/**
7328 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7329 *
7330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7331 */
7332IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7333{
7334 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7335 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7336 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7337}
7338
7339
7340/**
7341 * Marks the specified stack register as free (for FFREE).
7342 *
7343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7344 * @param iStReg The register to free.
7345 */
7346IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7347{
7348 Assert(iStReg < 8);
7349 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7350 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7351 pFpuCtx->FTW &= ~RT_BIT(iReg);
7352}
7353
7354
7355/**
7356 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7357 *
7358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7359 */
7360IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7361{
7362 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7363 uint16_t uFsw = pFpuCtx->FSW;
7364 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7365 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7366 uFsw &= ~X86_FSW_TOP_MASK;
7367 uFsw |= uTop;
7368 pFpuCtx->FSW = uFsw;
7369}
7370
7371
7372/**
7373 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7374 *
7375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7376 */
7377IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7378{
7379 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7380 uint16_t uFsw = pFpuCtx->FSW;
7381 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7382 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7383 uFsw &= ~X86_FSW_TOP_MASK;
7384 uFsw |= uTop;
7385 pFpuCtx->FSW = uFsw;
7386}
7387
7388
7389/**
7390 * Updates the FSW, FOP, FPUIP, and FPUCS.
7391 *
7392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7393 * @param u16FSW The FSW from the current instruction.
7394 */
7395IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7396{
7397 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7398 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7399 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7400 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7401}
7402
7403
7404/**
7405 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7406 *
7407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7408 * @param u16FSW The FSW from the current instruction.
7409 */
7410IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7411{
7412 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7413 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7414 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7415 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7416 iemFpuMaybePopOne(pFpuCtx);
7417}
7418
7419
7420/**
7421 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7422 *
7423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7424 * @param u16FSW The FSW from the current instruction.
7425 * @param iEffSeg The effective memory operand selector register.
7426 * @param GCPtrEff The effective memory operand offset.
7427 */
7428IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7429{
7430 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7431 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7432 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7433 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7434 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7435}
7436
7437
7438/**
7439 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7440 *
7441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7442 * @param u16FSW The FSW from the current instruction.
7443 */
7444IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7445{
7446 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7447 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7448 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7449 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7450 iemFpuMaybePopOne(pFpuCtx);
7451 iemFpuMaybePopOne(pFpuCtx);
7452}
7453
7454
7455/**
7456 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7457 *
7458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7459 * @param u16FSW The FSW from the current instruction.
7460 * @param iEffSeg The effective memory operand selector register.
7461 * @param GCPtrEff The effective memory operand offset.
7462 */
7463IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7464{
7465 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7466 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7467 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7468 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7469 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7470 iemFpuMaybePopOne(pFpuCtx);
7471}
7472
7473
7474/**
7475 * Worker routine for raising an FPU stack underflow exception.
7476 *
7477 * @param pFpuCtx The FPU context.
7478 * @param iStReg The stack register being accessed.
7479 */
7480IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7481{
7482 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7483 if (pFpuCtx->FCW & X86_FCW_IM)
7484 {
7485 /* Masked underflow. */
7486 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7487 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7488 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7489 if (iStReg != UINT8_MAX)
7490 {
7491 pFpuCtx->FTW |= RT_BIT(iReg);
7492 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7493 }
7494 }
7495 else
7496 {
7497 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7498 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7499 }
7500}
7501
7502
7503/**
7504 * Raises a FPU stack underflow exception.
7505 *
7506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7507 * @param iStReg The destination register that should be loaded
7508 * with QNaN if \#IS is not masked. Specify
7509 * UINT8_MAX if none (like for fcom).
7510 */
7511DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7512{
7513 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7514 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7515 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7516 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7517}
7518
7519
7520DECL_NO_INLINE(IEM_STATIC, void)
7521iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7522{
7523 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7524 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7525 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7526 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7527 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7528}
7529
7530
7531DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7532{
7533 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7534 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7535 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7536 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7537 iemFpuMaybePopOne(pFpuCtx);
7538}
7539
7540
7541DECL_NO_INLINE(IEM_STATIC, void)
7542iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7543{
7544 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7545 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7546 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7547 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7548 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7549 iemFpuMaybePopOne(pFpuCtx);
7550}
7551
7552
7553DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7554{
7555 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7556 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7557 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7558 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7559 iemFpuMaybePopOne(pFpuCtx);
7560 iemFpuMaybePopOne(pFpuCtx);
7561}
7562
7563
7564DECL_NO_INLINE(IEM_STATIC, void)
7565iemFpuStackPushUnderflow(PVMCPU pVCpu)
7566{
7567 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7568 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7569 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7570
7571 if (pFpuCtx->FCW & X86_FCW_IM)
7572 {
7573 /* Masked overflow - Push QNaN. */
7574 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7575 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7576 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7577 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7578 pFpuCtx->FTW |= RT_BIT(iNewTop);
7579 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7580 iemFpuRotateStackPush(pFpuCtx);
7581 }
7582 else
7583 {
7584 /* Exception pending - don't change TOP or the register stack. */
7585 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7586 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7587 }
7588}
7589
7590
7591DECL_NO_INLINE(IEM_STATIC, void)
7592iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7593{
7594 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7595 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7596 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7597
7598 if (pFpuCtx->FCW & X86_FCW_IM)
7599 {
7600 /* Masked overflow - Push QNaN. */
7601 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7602 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7603 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7604 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7605 pFpuCtx->FTW |= RT_BIT(iNewTop);
7606 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7607 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7608 iemFpuRotateStackPush(pFpuCtx);
7609 }
7610 else
7611 {
7612 /* Exception pending - don't change TOP or the register stack. */
7613 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7614 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7615 }
7616}
7617
7618
7619/**
7620 * Worker routine for raising an FPU stack overflow exception on a push.
7621 *
7622 * @param pFpuCtx The FPU context.
7623 */
7624IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7625{
7626 if (pFpuCtx->FCW & X86_FCW_IM)
7627 {
7628 /* Masked overflow. */
7629 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7630 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7631 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7632 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7633 pFpuCtx->FTW |= RT_BIT(iNewTop);
7634 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7635 iemFpuRotateStackPush(pFpuCtx);
7636 }
7637 else
7638 {
7639 /* Exception pending - don't change TOP or the register stack. */
7640 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7641 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7642 }
7643}
7644
7645
7646/**
7647 * Raises a FPU stack overflow exception on a push.
7648 *
7649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7650 */
7651DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7652{
7653 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7654 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7655 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7656 iemFpuStackPushOverflowOnly(pFpuCtx);
7657}
7658
7659
7660/**
7661 * Raises a FPU stack overflow exception on a push with a memory operand.
7662 *
7663 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7664 * @param iEffSeg The effective memory operand selector register.
7665 * @param GCPtrEff The effective memory operand offset.
7666 */
7667DECL_NO_INLINE(IEM_STATIC, void)
7668iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7669{
7670 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7671 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7672 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7673 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7674 iemFpuStackPushOverflowOnly(pFpuCtx);
7675}
7676
7677
7678IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7679{
7680 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7681 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7682 if (pFpuCtx->FTW & RT_BIT(iReg))
7683 return VINF_SUCCESS;
7684 return VERR_NOT_FOUND;
7685}
7686
7687
7688IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7689{
7690 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7691 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7692 if (pFpuCtx->FTW & RT_BIT(iReg))
7693 {
7694 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7695 return VINF_SUCCESS;
7696 }
7697 return VERR_NOT_FOUND;
7698}
7699
7700
7701IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7702 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7703{
7704 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7705 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7706 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7707 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7708 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7709 {
7710 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7711 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7712 return VINF_SUCCESS;
7713 }
7714 return VERR_NOT_FOUND;
7715}
7716
7717
7718IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7719{
7720 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7721 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7722 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7723 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7724 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7725 {
7726 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7727 return VINF_SUCCESS;
7728 }
7729 return VERR_NOT_FOUND;
7730}
7731
7732
7733/**
7734 * Updates the FPU exception status after FCW is changed.
7735 *
7736 * @param pFpuCtx The FPU context.
7737 */
7738IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7739{
7740 uint16_t u16Fsw = pFpuCtx->FSW;
7741 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7742 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7743 else
7744 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7745 pFpuCtx->FSW = u16Fsw;
7746}
7747
7748
7749/**
7750 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7751 *
7752 * @returns The full FTW.
7753 * @param pFpuCtx The FPU context.
7754 */
7755IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7756{
7757 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7758 uint16_t u16Ftw = 0;
7759 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7760 for (unsigned iSt = 0; iSt < 8; iSt++)
7761 {
7762 unsigned const iReg = (iSt + iTop) & 7;
7763 if (!(u8Ftw & RT_BIT(iReg)))
7764 u16Ftw |= 3 << (iReg * 2); /* empty */
7765 else
7766 {
7767 uint16_t uTag;
7768 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7769 if (pr80Reg->s.uExponent == 0x7fff)
7770 uTag = 2; /* Exponent is all 1's => Special. */
7771 else if (pr80Reg->s.uExponent == 0x0000)
7772 {
7773 if (pr80Reg->s.u64Mantissa == 0x0000)
7774 uTag = 1; /* All bits are zero => Zero. */
7775 else
7776 uTag = 2; /* Must be special. */
7777 }
7778 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7779 uTag = 0; /* Valid. */
7780 else
7781 uTag = 2; /* Must be special. */
7782
7783 u16Ftw |= uTag << (iReg * 2); /* empty */
7784 }
7785 }
7786
7787 return u16Ftw;
7788}
7789
7790
7791/**
7792 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7793 *
7794 * @returns The compressed FTW.
7795 * @param u16FullFtw The full FTW to convert.
7796 */
7797IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7798{
7799 uint8_t u8Ftw = 0;
7800 for (unsigned i = 0; i < 8; i++)
7801 {
7802 if ((u16FullFtw & 3) != 3 /*empty*/)
7803 u8Ftw |= RT_BIT(i);
7804 u16FullFtw >>= 2;
7805 }
7806
7807 return u8Ftw;
7808}
7809
7810/** @} */
7811
7812
7813/** @name Memory access.
7814 *
7815 * @{
7816 */
7817
7818
7819/**
7820 * Updates the IEMCPU::cbWritten counter if applicable.
7821 *
7822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7823 * @param fAccess The access being accounted for.
7824 * @param cbMem The access size.
7825 */
7826DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7827{
7828 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7829 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7830 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7831}
7832
7833
7834/**
7835 * Checks if the given segment can be written to, raise the appropriate
7836 * exception if not.
7837 *
7838 * @returns VBox strict status code.
7839 *
7840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7841 * @param pHid Pointer to the hidden register.
7842 * @param iSegReg The register number.
7843 * @param pu64BaseAddr Where to return the base address to use for the
7844 * segment. (In 64-bit code it may differ from the
7845 * base in the hidden segment.)
7846 */
7847IEM_STATIC VBOXSTRICTRC
7848iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7849{
7850 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7851 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7852 else
7853 {
7854 if (!pHid->Attr.n.u1Present)
7855 {
7856 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7857 AssertRelease(uSel == 0);
7858 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7859 return iemRaiseGeneralProtectionFault0(pVCpu);
7860 }
7861
7862 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7863 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7864 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7865 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7866 *pu64BaseAddr = pHid->u64Base;
7867 }
7868 return VINF_SUCCESS;
7869}
7870
7871
7872/**
7873 * Checks if the given segment can be read from, raise the appropriate
7874 * exception if not.
7875 *
7876 * @returns VBox strict status code.
7877 *
7878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7879 * @param pHid Pointer to the hidden register.
7880 * @param iSegReg The register number.
7881 * @param pu64BaseAddr Where to return the base address to use for the
7882 * segment. (In 64-bit code it may differ from the
7883 * base in the hidden segment.)
7884 */
7885IEM_STATIC VBOXSTRICTRC
7886iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7887{
7888 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7889 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7890 else
7891 {
7892 if (!pHid->Attr.n.u1Present)
7893 {
7894 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7895 AssertRelease(uSel == 0);
7896 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7897 return iemRaiseGeneralProtectionFault0(pVCpu);
7898 }
7899
7900 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7901 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7902 *pu64BaseAddr = pHid->u64Base;
7903 }
7904 return VINF_SUCCESS;
7905}
7906
7907
7908/**
7909 * Applies the segment limit, base and attributes.
7910 *
7911 * This may raise a \#GP or \#SS.
7912 *
7913 * @returns VBox strict status code.
7914 *
7915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7916 * @param fAccess The kind of access which is being performed.
7917 * @param iSegReg The index of the segment register to apply.
7918 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7919 * TSS, ++).
7920 * @param cbMem The access size.
7921 * @param pGCPtrMem Pointer to the guest memory address to apply
7922 * segmentation to. Input and output parameter.
7923 */
7924IEM_STATIC VBOXSTRICTRC
7925iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7926{
7927 if (iSegReg == UINT8_MAX)
7928 return VINF_SUCCESS;
7929
7930 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7931 switch (pVCpu->iem.s.enmCpuMode)
7932 {
7933 case IEMMODE_16BIT:
7934 case IEMMODE_32BIT:
7935 {
7936 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7937 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7938
7939 if ( pSel->Attr.n.u1Present
7940 && !pSel->Attr.n.u1Unusable)
7941 {
7942 Assert(pSel->Attr.n.u1DescType);
7943 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7944 {
7945 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7946 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7947 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7948
7949 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7950 {
7951 /** @todo CPL check. */
7952 }
7953
7954 /*
7955 * There are two kinds of data selectors, normal and expand down.
7956 */
7957 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7958 {
7959 if ( GCPtrFirst32 > pSel->u32Limit
7960 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7961 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7962 }
7963 else
7964 {
7965 /*
7966 * The upper boundary is defined by the B bit, not the G bit!
7967 */
7968 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7969 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7970 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7971 }
7972 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7973 }
7974 else
7975 {
7976
7977 /*
7978 * Code selector and usually be used to read thru, writing is
7979 * only permitted in real and V8086 mode.
7980 */
7981 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7982 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7983 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7984 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7985 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7986
7987 if ( GCPtrFirst32 > pSel->u32Limit
7988 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7989 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7990
7991 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7992 {
7993 /** @todo CPL check. */
7994 }
7995
7996 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7997 }
7998 }
7999 else
8000 return iemRaiseGeneralProtectionFault0(pVCpu);
8001 return VINF_SUCCESS;
8002 }
8003
8004 case IEMMODE_64BIT:
8005 {
8006 RTGCPTR GCPtrMem = *pGCPtrMem;
8007 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8008 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8009
8010 Assert(cbMem >= 1);
8011 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8012 return VINF_SUCCESS;
8013 return iemRaiseGeneralProtectionFault0(pVCpu);
8014 }
8015
8016 default:
8017 AssertFailedReturn(VERR_IEM_IPE_7);
8018 }
8019}
8020
8021
8022/**
8023 * Translates a virtual address to a physical physical address and checks if we
8024 * can access the page as specified.
8025 *
8026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8027 * @param GCPtrMem The virtual address.
8028 * @param fAccess The intended access.
8029 * @param pGCPhysMem Where to return the physical address.
8030 */
8031IEM_STATIC VBOXSTRICTRC
8032iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8033{
8034 /** @todo Need a different PGM interface here. We're currently using
8035 * generic / REM interfaces. this won't cut it for R0 & RC. */
8036 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8037 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8038 RTGCPHYS GCPhys;
8039 uint64_t fFlags;
8040 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8041 if (RT_FAILURE(rc))
8042 {
8043 /** @todo Check unassigned memory in unpaged mode. */
8044 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8045 *pGCPhysMem = NIL_RTGCPHYS;
8046 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8047 }
8048
8049 /* If the page is writable and does not have the no-exec bit set, all
8050 access is allowed. Otherwise we'll have to check more carefully... */
8051 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8052 {
8053 /* Write to read only memory? */
8054 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8055 && !(fFlags & X86_PTE_RW)
8056 && ( (pVCpu->iem.s.uCpl == 3
8057 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8058 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8059 {
8060 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8061 *pGCPhysMem = NIL_RTGCPHYS;
8062 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8063 }
8064
8065 /* Kernel memory accessed by userland? */
8066 if ( !(fFlags & X86_PTE_US)
8067 && pVCpu->iem.s.uCpl == 3
8068 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8069 {
8070 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8071 *pGCPhysMem = NIL_RTGCPHYS;
8072 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8073 }
8074
8075 /* Executing non-executable memory? */
8076 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8077 && (fFlags & X86_PTE_PAE_NX)
8078 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8079 {
8080 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8081 *pGCPhysMem = NIL_RTGCPHYS;
8082 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8083 VERR_ACCESS_DENIED);
8084 }
8085 }
8086
8087 /*
8088 * Set the dirty / access flags.
8089 * ASSUMES this is set when the address is translated rather than on committ...
8090 */
8091 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8092 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8093 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8094 {
8095 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8096 AssertRC(rc2);
8097 }
8098
8099 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8100 *pGCPhysMem = GCPhys;
8101 return VINF_SUCCESS;
8102}
8103
8104
8105
8106/**
8107 * Maps a physical page.
8108 *
8109 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8111 * @param GCPhysMem The physical address.
8112 * @param fAccess The intended access.
8113 * @param ppvMem Where to return the mapping address.
8114 * @param pLock The PGM lock.
8115 */
8116IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8117{
8118#ifdef IEM_VERIFICATION_MODE_FULL
8119 /* Force the alternative path so we can ignore writes. */
8120 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8121 {
8122 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8123 {
8124 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8125 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8126 if (RT_FAILURE(rc2))
8127 pVCpu->iem.s.fProblematicMemory = true;
8128 }
8129 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8130 }
8131#endif
8132#ifdef IEM_LOG_MEMORY_WRITES
8133 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8134 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8135#endif
8136#ifdef IEM_VERIFICATION_MODE_MINIMAL
8137 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8138#endif
8139
8140 /** @todo This API may require some improving later. A private deal with PGM
8141 * regarding locking and unlocking needs to be struct. A couple of TLBs
8142 * living in PGM, but with publicly accessible inlined access methods
8143 * could perhaps be an even better solution. */
8144 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8145 GCPhysMem,
8146 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8147 pVCpu->iem.s.fBypassHandlers,
8148 ppvMem,
8149 pLock);
8150 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8151 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8152
8153#ifdef IEM_VERIFICATION_MODE_FULL
8154 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8155 pVCpu->iem.s.fProblematicMemory = true;
8156#endif
8157 return rc;
8158}
8159
8160
8161/**
8162 * Unmap a page previously mapped by iemMemPageMap.
8163 *
8164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8165 * @param GCPhysMem The physical address.
8166 * @param fAccess The intended access.
8167 * @param pvMem What iemMemPageMap returned.
8168 * @param pLock The PGM lock.
8169 */
8170DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8171{
8172 NOREF(pVCpu);
8173 NOREF(GCPhysMem);
8174 NOREF(fAccess);
8175 NOREF(pvMem);
8176 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8177}
8178
8179
8180/**
8181 * Looks up a memory mapping entry.
8182 *
8183 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8185 * @param pvMem The memory address.
8186 * @param fAccess The access to.
8187 */
8188DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8189{
8190 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8191 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8192 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8193 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8194 return 0;
8195 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8196 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8197 return 1;
8198 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8199 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8200 return 2;
8201 return VERR_NOT_FOUND;
8202}
8203
8204
8205/**
8206 * Finds a free memmap entry when using iNextMapping doesn't work.
8207 *
8208 * @returns Memory mapping index, 1024 on failure.
8209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8210 */
8211IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8212{
8213 /*
8214 * The easy case.
8215 */
8216 if (pVCpu->iem.s.cActiveMappings == 0)
8217 {
8218 pVCpu->iem.s.iNextMapping = 1;
8219 return 0;
8220 }
8221
8222 /* There should be enough mappings for all instructions. */
8223 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8224
8225 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8226 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8227 return i;
8228
8229 AssertFailedReturn(1024);
8230}
8231
8232
8233/**
8234 * Commits a bounce buffer that needs writing back and unmaps it.
8235 *
8236 * @returns Strict VBox status code.
8237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8238 * @param iMemMap The index of the buffer to commit.
8239 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8240 * Always false in ring-3, obviously.
8241 */
8242IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8243{
8244 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8245 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8246#ifdef IN_RING3
8247 Assert(!fPostponeFail);
8248 RT_NOREF_PV(fPostponeFail);
8249#endif
8250
8251 /*
8252 * Do the writing.
8253 */
8254#ifndef IEM_VERIFICATION_MODE_MINIMAL
8255 PVM pVM = pVCpu->CTX_SUFF(pVM);
8256 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8257 && !IEM_VERIFICATION_ENABLED(pVCpu))
8258 {
8259 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8260 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8261 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8262 if (!pVCpu->iem.s.fBypassHandlers)
8263 {
8264 /*
8265 * Carefully and efficiently dealing with access handler return
8266 * codes make this a little bloated.
8267 */
8268 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8269 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8270 pbBuf,
8271 cbFirst,
8272 PGMACCESSORIGIN_IEM);
8273 if (rcStrict == VINF_SUCCESS)
8274 {
8275 if (cbSecond)
8276 {
8277 rcStrict = PGMPhysWrite(pVM,
8278 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8279 pbBuf + cbFirst,
8280 cbSecond,
8281 PGMACCESSORIGIN_IEM);
8282 if (rcStrict == VINF_SUCCESS)
8283 { /* nothing */ }
8284 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8285 {
8286 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8287 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8288 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8289 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8290 }
8291# ifndef IN_RING3
8292 else if (fPostponeFail)
8293 {
8294 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8295 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8296 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8297 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8298 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8299 return iemSetPassUpStatus(pVCpu, rcStrict);
8300 }
8301# endif
8302 else
8303 {
8304 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8305 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8306 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8307 return rcStrict;
8308 }
8309 }
8310 }
8311 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8312 {
8313 if (!cbSecond)
8314 {
8315 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8316 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8317 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8318 }
8319 else
8320 {
8321 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8322 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8323 pbBuf + cbFirst,
8324 cbSecond,
8325 PGMACCESSORIGIN_IEM);
8326 if (rcStrict2 == VINF_SUCCESS)
8327 {
8328 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8329 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8330 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8331 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8332 }
8333 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8334 {
8335 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8336 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8337 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8338 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8339 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8340 }
8341# ifndef IN_RING3
8342 else if (fPostponeFail)
8343 {
8344 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8345 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8346 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8347 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8348 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8349 return iemSetPassUpStatus(pVCpu, rcStrict);
8350 }
8351# endif
8352 else
8353 {
8354 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8355 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8356 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8357 return rcStrict2;
8358 }
8359 }
8360 }
8361# ifndef IN_RING3
8362 else if (fPostponeFail)
8363 {
8364 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8365 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8366 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8367 if (!cbSecond)
8368 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8369 else
8370 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8371 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8372 return iemSetPassUpStatus(pVCpu, rcStrict);
8373 }
8374# endif
8375 else
8376 {
8377 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8378 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8379 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8380 return rcStrict;
8381 }
8382 }
8383 else
8384 {
8385 /*
8386 * No access handlers, much simpler.
8387 */
8388 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8389 if (RT_SUCCESS(rc))
8390 {
8391 if (cbSecond)
8392 {
8393 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8394 if (RT_SUCCESS(rc))
8395 { /* likely */ }
8396 else
8397 {
8398 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8399 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8401 return rc;
8402 }
8403 }
8404 }
8405 else
8406 {
8407 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8408 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8409 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8410 return rc;
8411 }
8412 }
8413 }
8414#endif
8415
8416#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8417 /*
8418 * Record the write(s).
8419 */
8420 if (!pVCpu->iem.s.fNoRem)
8421 {
8422 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8423 if (pEvtRec)
8424 {
8425 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8426 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8427 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8428 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8429 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8430 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8431 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8432 }
8433 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8434 {
8435 pEvtRec = iemVerifyAllocRecord(pVCpu);
8436 if (pEvtRec)
8437 {
8438 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8439 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8440 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8441 memcpy(pEvtRec->u.RamWrite.ab,
8442 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8443 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8444 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8445 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8446 }
8447 }
8448 }
8449#endif
8450#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8451 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8452 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8453 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8454 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8455 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8456 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8457
8458 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8459 g_cbIemWrote = cbWrote;
8460 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8461#endif
8462
8463 /*
8464 * Free the mapping entry.
8465 */
8466 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8467 Assert(pVCpu->iem.s.cActiveMappings != 0);
8468 pVCpu->iem.s.cActiveMappings--;
8469 return VINF_SUCCESS;
8470}
8471
8472
8473/**
8474 * iemMemMap worker that deals with a request crossing pages.
8475 */
8476IEM_STATIC VBOXSTRICTRC
8477iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8478{
8479 /*
8480 * Do the address translations.
8481 */
8482 RTGCPHYS GCPhysFirst;
8483 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8484 if (rcStrict != VINF_SUCCESS)
8485 return rcStrict;
8486
8487 RTGCPHYS GCPhysSecond;
8488 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8489 fAccess, &GCPhysSecond);
8490 if (rcStrict != VINF_SUCCESS)
8491 return rcStrict;
8492 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8493
8494 PVM pVM = pVCpu->CTX_SUFF(pVM);
8495#ifdef IEM_VERIFICATION_MODE_FULL
8496 /*
8497 * Detect problematic memory when verifying so we can select
8498 * the right execution engine. (TLB: Redo this.)
8499 */
8500 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8501 {
8502 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8503 if (RT_SUCCESS(rc2))
8504 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8505 if (RT_FAILURE(rc2))
8506 pVCpu->iem.s.fProblematicMemory = true;
8507 }
8508#endif
8509
8510
8511 /*
8512 * Read in the current memory content if it's a read, execute or partial
8513 * write access.
8514 */
8515 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8516 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8517 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8518
8519 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8520 {
8521 if (!pVCpu->iem.s.fBypassHandlers)
8522 {
8523 /*
8524 * Must carefully deal with access handler status codes here,
8525 * makes the code a bit bloated.
8526 */
8527 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8528 if (rcStrict == VINF_SUCCESS)
8529 {
8530 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8531 if (rcStrict == VINF_SUCCESS)
8532 { /*likely */ }
8533 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8534 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8535 else
8536 {
8537 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8538 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8539 return rcStrict;
8540 }
8541 }
8542 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8543 {
8544 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8545 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8546 {
8547 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8548 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8549 }
8550 else
8551 {
8552 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8553 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8554 return rcStrict2;
8555 }
8556 }
8557 else
8558 {
8559 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8560 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8561 return rcStrict;
8562 }
8563 }
8564 else
8565 {
8566 /*
8567 * No informational status codes here, much more straight forward.
8568 */
8569 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8570 if (RT_SUCCESS(rc))
8571 {
8572 Assert(rc == VINF_SUCCESS);
8573 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8574 if (RT_SUCCESS(rc))
8575 Assert(rc == VINF_SUCCESS);
8576 else
8577 {
8578 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8579 return rc;
8580 }
8581 }
8582 else
8583 {
8584 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8585 return rc;
8586 }
8587 }
8588
8589#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8590 if ( !pVCpu->iem.s.fNoRem
8591 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8592 {
8593 /*
8594 * Record the reads.
8595 */
8596 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8597 if (pEvtRec)
8598 {
8599 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8600 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8601 pEvtRec->u.RamRead.cb = cbFirstPage;
8602 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8603 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8604 }
8605 pEvtRec = iemVerifyAllocRecord(pVCpu);
8606 if (pEvtRec)
8607 {
8608 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8609 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8610 pEvtRec->u.RamRead.cb = cbSecondPage;
8611 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8612 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8613 }
8614 }
8615#endif
8616 }
8617#ifdef VBOX_STRICT
8618 else
8619 memset(pbBuf, 0xcc, cbMem);
8620 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8621 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8622#endif
8623
8624 /*
8625 * Commit the bounce buffer entry.
8626 */
8627 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8628 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8629 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8630 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8631 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8632 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8633 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8634 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8635 pVCpu->iem.s.cActiveMappings++;
8636
8637 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8638 *ppvMem = pbBuf;
8639 return VINF_SUCCESS;
8640}
8641
8642
8643/**
8644 * iemMemMap woker that deals with iemMemPageMap failures.
8645 */
8646IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8647 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8648{
8649 /*
8650 * Filter out conditions we can handle and the ones which shouldn't happen.
8651 */
8652 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8653 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8654 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8655 {
8656 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8657 return rcMap;
8658 }
8659 pVCpu->iem.s.cPotentialExits++;
8660
8661 /*
8662 * Read in the current memory content if it's a read, execute or partial
8663 * write access.
8664 */
8665 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8666 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8667 {
8668 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8669 memset(pbBuf, 0xff, cbMem);
8670 else
8671 {
8672 int rc;
8673 if (!pVCpu->iem.s.fBypassHandlers)
8674 {
8675 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8676 if (rcStrict == VINF_SUCCESS)
8677 { /* nothing */ }
8678 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8679 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8680 else
8681 {
8682 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8683 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8684 return rcStrict;
8685 }
8686 }
8687 else
8688 {
8689 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8690 if (RT_SUCCESS(rc))
8691 { /* likely */ }
8692 else
8693 {
8694 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8695 GCPhysFirst, rc));
8696 return rc;
8697 }
8698 }
8699 }
8700
8701#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8702 if ( !pVCpu->iem.s.fNoRem
8703 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8704 {
8705 /*
8706 * Record the read.
8707 */
8708 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8709 if (pEvtRec)
8710 {
8711 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8712 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8713 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8714 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8715 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8716 }
8717 }
8718#endif
8719 }
8720#ifdef VBOX_STRICT
8721 else
8722 memset(pbBuf, 0xcc, cbMem);
8723#endif
8724#ifdef VBOX_STRICT
8725 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8726 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8727#endif
8728
8729 /*
8730 * Commit the bounce buffer entry.
8731 */
8732 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8733 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8734 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8735 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8736 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8737 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8738 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8739 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8740 pVCpu->iem.s.cActiveMappings++;
8741
8742 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8743 *ppvMem = pbBuf;
8744 return VINF_SUCCESS;
8745}
8746
8747
8748
8749/**
8750 * Maps the specified guest memory for the given kind of access.
8751 *
8752 * This may be using bounce buffering of the memory if it's crossing a page
8753 * boundary or if there is an access handler installed for any of it. Because
8754 * of lock prefix guarantees, we're in for some extra clutter when this
8755 * happens.
8756 *
8757 * This may raise a \#GP, \#SS, \#PF or \#AC.
8758 *
8759 * @returns VBox strict status code.
8760 *
8761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8762 * @param ppvMem Where to return the pointer to the mapped
8763 * memory.
8764 * @param cbMem The number of bytes to map. This is usually 1,
8765 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8766 * string operations it can be up to a page.
8767 * @param iSegReg The index of the segment register to use for
8768 * this access. The base and limits are checked.
8769 * Use UINT8_MAX to indicate that no segmentation
8770 * is required (for IDT, GDT and LDT accesses).
8771 * @param GCPtrMem The address of the guest memory.
8772 * @param fAccess How the memory is being accessed. The
8773 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8774 * how to map the memory, while the
8775 * IEM_ACCESS_WHAT_XXX bit is used when raising
8776 * exceptions.
8777 */
8778IEM_STATIC VBOXSTRICTRC
8779iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8780{
8781 /*
8782 * Check the input and figure out which mapping entry to use.
8783 */
8784 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8785 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8786 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8787
8788 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8789 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8790 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8791 {
8792 iMemMap = iemMemMapFindFree(pVCpu);
8793 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8794 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8795 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8796 pVCpu->iem.s.aMemMappings[2].fAccess),
8797 VERR_IEM_IPE_9);
8798 }
8799
8800 /*
8801 * Map the memory, checking that we can actually access it. If something
8802 * slightly complicated happens, fall back on bounce buffering.
8803 */
8804 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8805 if (rcStrict != VINF_SUCCESS)
8806 return rcStrict;
8807
8808 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8809 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8810
8811 RTGCPHYS GCPhysFirst;
8812 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8813 if (rcStrict != VINF_SUCCESS)
8814 return rcStrict;
8815
8816 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8817 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8818 if (fAccess & IEM_ACCESS_TYPE_READ)
8819 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8820
8821 void *pvMem;
8822 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8823 if (rcStrict != VINF_SUCCESS)
8824 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8825
8826 /*
8827 * Fill in the mapping table entry.
8828 */
8829 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8830 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8831 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8832 pVCpu->iem.s.cActiveMappings++;
8833
8834 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8835 *ppvMem = pvMem;
8836 return VINF_SUCCESS;
8837}
8838
8839
8840/**
8841 * Commits the guest memory if bounce buffered and unmaps it.
8842 *
8843 * @returns Strict VBox status code.
8844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8845 * @param pvMem The mapping.
8846 * @param fAccess The kind of access.
8847 */
8848IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8849{
8850 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8851 AssertReturn(iMemMap >= 0, iMemMap);
8852
8853 /* If it's bounce buffered, we may need to write back the buffer. */
8854 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8855 {
8856 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8857 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8858 }
8859 /* Otherwise unlock it. */
8860 else
8861 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8862
8863 /* Free the entry. */
8864 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8865 Assert(pVCpu->iem.s.cActiveMappings != 0);
8866 pVCpu->iem.s.cActiveMappings--;
8867 return VINF_SUCCESS;
8868}
8869
8870#ifdef IEM_WITH_SETJMP
8871
8872/**
8873 * Maps the specified guest memory for the given kind of access, longjmp on
8874 * error.
8875 *
8876 * This may be using bounce buffering of the memory if it's crossing a page
8877 * boundary or if there is an access handler installed for any of it. Because
8878 * of lock prefix guarantees, we're in for some extra clutter when this
8879 * happens.
8880 *
8881 * This may raise a \#GP, \#SS, \#PF or \#AC.
8882 *
8883 * @returns Pointer to the mapped memory.
8884 *
8885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8886 * @param cbMem The number of bytes to map. This is usually 1,
8887 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8888 * string operations it can be up to a page.
8889 * @param iSegReg The index of the segment register to use for
8890 * this access. The base and limits are checked.
8891 * Use UINT8_MAX to indicate that no segmentation
8892 * is required (for IDT, GDT and LDT accesses).
8893 * @param GCPtrMem The address of the guest memory.
8894 * @param fAccess How the memory is being accessed. The
8895 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8896 * how to map the memory, while the
8897 * IEM_ACCESS_WHAT_XXX bit is used when raising
8898 * exceptions.
8899 */
8900IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8901{
8902 /*
8903 * Check the input and figure out which mapping entry to use.
8904 */
8905 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8906 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8907 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8908
8909 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8910 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8911 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8912 {
8913 iMemMap = iemMemMapFindFree(pVCpu);
8914 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8915 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8916 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8917 pVCpu->iem.s.aMemMappings[2].fAccess),
8918 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8919 }
8920
8921 /*
8922 * Map the memory, checking that we can actually access it. If something
8923 * slightly complicated happens, fall back on bounce buffering.
8924 */
8925 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8926 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8927 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8928
8929 /* Crossing a page boundary? */
8930 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8931 { /* No (likely). */ }
8932 else
8933 {
8934 void *pvMem;
8935 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8936 if (rcStrict == VINF_SUCCESS)
8937 return pvMem;
8938 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8939 }
8940
8941 RTGCPHYS GCPhysFirst;
8942 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8943 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8944 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8945
8946 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8947 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8948 if (fAccess & IEM_ACCESS_TYPE_READ)
8949 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8950
8951 void *pvMem;
8952 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8953 if (rcStrict == VINF_SUCCESS)
8954 { /* likely */ }
8955 else
8956 {
8957 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8958 if (rcStrict == VINF_SUCCESS)
8959 return pvMem;
8960 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8961 }
8962
8963 /*
8964 * Fill in the mapping table entry.
8965 */
8966 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8967 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8968 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8969 pVCpu->iem.s.cActiveMappings++;
8970
8971 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8972 return pvMem;
8973}
8974
8975
8976/**
8977 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8978 *
8979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8980 * @param pvMem The mapping.
8981 * @param fAccess The kind of access.
8982 */
8983IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8984{
8985 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8986 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8987
8988 /* If it's bounce buffered, we may need to write back the buffer. */
8989 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8990 {
8991 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8992 {
8993 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8994 if (rcStrict == VINF_SUCCESS)
8995 return;
8996 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8997 }
8998 }
8999 /* Otherwise unlock it. */
9000 else
9001 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9002
9003 /* Free the entry. */
9004 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9005 Assert(pVCpu->iem.s.cActiveMappings != 0);
9006 pVCpu->iem.s.cActiveMappings--;
9007}
9008
9009#endif
9010
9011#ifndef IN_RING3
9012/**
9013 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9014 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9015 *
9016 * Allows the instruction to be completed and retired, while the IEM user will
9017 * return to ring-3 immediately afterwards and do the postponed writes there.
9018 *
9019 * @returns VBox status code (no strict statuses). Caller must check
9020 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9022 * @param pvMem The mapping.
9023 * @param fAccess The kind of access.
9024 */
9025IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9026{
9027 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9028 AssertReturn(iMemMap >= 0, iMemMap);
9029
9030 /* If it's bounce buffered, we may need to write back the buffer. */
9031 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9032 {
9033 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9034 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9035 }
9036 /* Otherwise unlock it. */
9037 else
9038 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9039
9040 /* Free the entry. */
9041 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9042 Assert(pVCpu->iem.s.cActiveMappings != 0);
9043 pVCpu->iem.s.cActiveMappings--;
9044 return VINF_SUCCESS;
9045}
9046#endif
9047
9048
9049/**
9050 * Rollbacks mappings, releasing page locks and such.
9051 *
9052 * The caller shall only call this after checking cActiveMappings.
9053 *
9054 * @returns Strict VBox status code to pass up.
9055 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9056 */
9057IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9058{
9059 Assert(pVCpu->iem.s.cActiveMappings > 0);
9060
9061 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9062 while (iMemMap-- > 0)
9063 {
9064 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9065 if (fAccess != IEM_ACCESS_INVALID)
9066 {
9067 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9068 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9069 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9070 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9071 Assert(pVCpu->iem.s.cActiveMappings > 0);
9072 pVCpu->iem.s.cActiveMappings--;
9073 }
9074 }
9075}
9076
9077
9078/**
9079 * Fetches a data byte.
9080 *
9081 * @returns Strict VBox status code.
9082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9083 * @param pu8Dst Where to return the byte.
9084 * @param iSegReg The index of the segment register to use for
9085 * this access. The base and limits are checked.
9086 * @param GCPtrMem The address of the guest memory.
9087 */
9088IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9089{
9090 /* The lazy approach for now... */
9091 uint8_t const *pu8Src;
9092 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9093 if (rc == VINF_SUCCESS)
9094 {
9095 *pu8Dst = *pu8Src;
9096 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9097 }
9098 return rc;
9099}
9100
9101
9102#ifdef IEM_WITH_SETJMP
9103/**
9104 * Fetches a data byte, longjmp on error.
9105 *
9106 * @returns The byte.
9107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9108 * @param iSegReg The index of the segment register to use for
9109 * this access. The base and limits are checked.
9110 * @param GCPtrMem The address of the guest memory.
9111 */
9112DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9113{
9114 /* The lazy approach for now... */
9115 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9116 uint8_t const bRet = *pu8Src;
9117 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9118 return bRet;
9119}
9120#endif /* IEM_WITH_SETJMP */
9121
9122
9123/**
9124 * Fetches a data word.
9125 *
9126 * @returns Strict VBox status code.
9127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9128 * @param pu16Dst Where to return the word.
9129 * @param iSegReg The index of the segment register to use for
9130 * this access. The base and limits are checked.
9131 * @param GCPtrMem The address of the guest memory.
9132 */
9133IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9134{
9135 /* The lazy approach for now... */
9136 uint16_t const *pu16Src;
9137 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9138 if (rc == VINF_SUCCESS)
9139 {
9140 *pu16Dst = *pu16Src;
9141 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9142 }
9143 return rc;
9144}
9145
9146
9147#ifdef IEM_WITH_SETJMP
9148/**
9149 * Fetches a data word, longjmp on error.
9150 *
9151 * @returns The word
9152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9153 * @param iSegReg The index of the segment register to use for
9154 * this access. The base and limits are checked.
9155 * @param GCPtrMem The address of the guest memory.
9156 */
9157DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9158{
9159 /* The lazy approach for now... */
9160 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9161 uint16_t const u16Ret = *pu16Src;
9162 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9163 return u16Ret;
9164}
9165#endif
9166
9167
9168/**
9169 * Fetches a data dword.
9170 *
9171 * @returns Strict VBox status code.
9172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9173 * @param pu32Dst Where to return the dword.
9174 * @param iSegReg The index of the segment register to use for
9175 * this access. The base and limits are checked.
9176 * @param GCPtrMem The address of the guest memory.
9177 */
9178IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9179{
9180 /* The lazy approach for now... */
9181 uint32_t const *pu32Src;
9182 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9183 if (rc == VINF_SUCCESS)
9184 {
9185 *pu32Dst = *pu32Src;
9186 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9187 }
9188 return rc;
9189}
9190
9191
9192#ifdef IEM_WITH_SETJMP
9193
9194IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9195{
9196 Assert(cbMem >= 1);
9197 Assert(iSegReg < X86_SREG_COUNT);
9198
9199 /*
9200 * 64-bit mode is simpler.
9201 */
9202 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9203 {
9204 if (iSegReg >= X86_SREG_FS)
9205 {
9206 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9207 GCPtrMem += pSel->u64Base;
9208 }
9209
9210 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9211 return GCPtrMem;
9212 }
9213 /*
9214 * 16-bit and 32-bit segmentation.
9215 */
9216 else
9217 {
9218 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9219 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9220 == X86DESCATTR_P /* data, expand up */
9221 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9222 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9223 {
9224 /* expand up */
9225 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9226 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9227 && GCPtrLast32 > (uint32_t)GCPtrMem))
9228 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9229 }
9230 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9231 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9232 {
9233 /* expand down */
9234 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9235 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9236 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9237 && GCPtrLast32 > (uint32_t)GCPtrMem))
9238 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9239 }
9240 else
9241 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9242 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9243 }
9244 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9245}
9246
9247
9248IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9249{
9250 Assert(cbMem >= 1);
9251 Assert(iSegReg < X86_SREG_COUNT);
9252
9253 /*
9254 * 64-bit mode is simpler.
9255 */
9256 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9257 {
9258 if (iSegReg >= X86_SREG_FS)
9259 {
9260 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9261 GCPtrMem += pSel->u64Base;
9262 }
9263
9264 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9265 return GCPtrMem;
9266 }
9267 /*
9268 * 16-bit and 32-bit segmentation.
9269 */
9270 else
9271 {
9272 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9273 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9274 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9275 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9276 {
9277 /* expand up */
9278 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9279 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9280 && GCPtrLast32 > (uint32_t)GCPtrMem))
9281 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9282 }
9283 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9284 {
9285 /* expand down */
9286 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9287 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9288 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9289 && GCPtrLast32 > (uint32_t)GCPtrMem))
9290 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9291 }
9292 else
9293 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9294 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9295 }
9296 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9297}
9298
9299
9300/**
9301 * Fetches a data dword, longjmp on error, fallback/safe version.
9302 *
9303 * @returns The dword
9304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9305 * @param iSegReg The index of the segment register to use for
9306 * this access. The base and limits are checked.
9307 * @param GCPtrMem The address of the guest memory.
9308 */
9309IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9310{
9311 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9312 uint32_t const u32Ret = *pu32Src;
9313 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9314 return u32Ret;
9315}
9316
9317
9318/**
9319 * Fetches a data dword, longjmp on error.
9320 *
9321 * @returns The dword
9322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9323 * @param iSegReg The index of the segment register to use for
9324 * this access. The base and limits are checked.
9325 * @param GCPtrMem The address of the guest memory.
9326 */
9327DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9328{
9329# ifdef IEM_WITH_DATA_TLB
9330 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9331 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9332 {
9333 /// @todo more later.
9334 }
9335
9336 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9337# else
9338 /* The lazy approach. */
9339 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9340 uint32_t const u32Ret = *pu32Src;
9341 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9342 return u32Ret;
9343# endif
9344}
9345#endif
9346
9347
9348#ifdef SOME_UNUSED_FUNCTION
9349/**
9350 * Fetches a data dword and sign extends it to a qword.
9351 *
9352 * @returns Strict VBox status code.
9353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9354 * @param pu64Dst Where to return the sign extended value.
9355 * @param iSegReg The index of the segment register to use for
9356 * this access. The base and limits are checked.
9357 * @param GCPtrMem The address of the guest memory.
9358 */
9359IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9360{
9361 /* The lazy approach for now... */
9362 int32_t const *pi32Src;
9363 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9364 if (rc == VINF_SUCCESS)
9365 {
9366 *pu64Dst = *pi32Src;
9367 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9368 }
9369#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9370 else
9371 *pu64Dst = 0;
9372#endif
9373 return rc;
9374}
9375#endif
9376
9377
9378/**
9379 * Fetches a data qword.
9380 *
9381 * @returns Strict VBox status code.
9382 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9383 * @param pu64Dst Where to return the qword.
9384 * @param iSegReg The index of the segment register to use for
9385 * this access. The base and limits are checked.
9386 * @param GCPtrMem The address of the guest memory.
9387 */
9388IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9389{
9390 /* The lazy approach for now... */
9391 uint64_t const *pu64Src;
9392 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9393 if (rc == VINF_SUCCESS)
9394 {
9395 *pu64Dst = *pu64Src;
9396 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9397 }
9398 return rc;
9399}
9400
9401
9402#ifdef IEM_WITH_SETJMP
9403/**
9404 * Fetches a data qword, longjmp on error.
9405 *
9406 * @returns The qword.
9407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9408 * @param iSegReg The index of the segment register to use for
9409 * this access. The base and limits are checked.
9410 * @param GCPtrMem The address of the guest memory.
9411 */
9412DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9413{
9414 /* The lazy approach for now... */
9415 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9416 uint64_t const u64Ret = *pu64Src;
9417 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9418 return u64Ret;
9419}
9420#endif
9421
9422
9423/**
9424 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9425 *
9426 * @returns Strict VBox status code.
9427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9428 * @param pu64Dst Where to return the qword.
9429 * @param iSegReg The index of the segment register to use for
9430 * this access. The base and limits are checked.
9431 * @param GCPtrMem The address of the guest memory.
9432 */
9433IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9434{
9435 /* The lazy approach for now... */
9436 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9437 if (RT_UNLIKELY(GCPtrMem & 15))
9438 return iemRaiseGeneralProtectionFault0(pVCpu);
9439
9440 uint64_t const *pu64Src;
9441 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9442 if (rc == VINF_SUCCESS)
9443 {
9444 *pu64Dst = *pu64Src;
9445 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9446 }
9447 return rc;
9448}
9449
9450
9451#ifdef IEM_WITH_SETJMP
9452/**
9453 * Fetches a data qword, longjmp on error.
9454 *
9455 * @returns The qword.
9456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9457 * @param iSegReg The index of the segment register to use for
9458 * this access. The base and limits are checked.
9459 * @param GCPtrMem The address of the guest memory.
9460 */
9461DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9462{
9463 /* The lazy approach for now... */
9464 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9465 if (RT_LIKELY(!(GCPtrMem & 15)))
9466 {
9467 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9468 uint64_t const u64Ret = *pu64Src;
9469 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9470 return u64Ret;
9471 }
9472
9473 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9474 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9475}
9476#endif
9477
9478
9479/**
9480 * Fetches a data tword.
9481 *
9482 * @returns Strict VBox status code.
9483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9484 * @param pr80Dst Where to return the tword.
9485 * @param iSegReg The index of the segment register to use for
9486 * this access. The base and limits are checked.
9487 * @param GCPtrMem The address of the guest memory.
9488 */
9489IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9490{
9491 /* The lazy approach for now... */
9492 PCRTFLOAT80U pr80Src;
9493 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9494 if (rc == VINF_SUCCESS)
9495 {
9496 *pr80Dst = *pr80Src;
9497 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9498 }
9499 return rc;
9500}
9501
9502
9503#ifdef IEM_WITH_SETJMP
9504/**
9505 * Fetches a data tword, longjmp on error.
9506 *
9507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9508 * @param pr80Dst Where to return the tword.
9509 * @param iSegReg The index of the segment register to use for
9510 * this access. The base and limits are checked.
9511 * @param GCPtrMem The address of the guest memory.
9512 */
9513DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9514{
9515 /* The lazy approach for now... */
9516 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9517 *pr80Dst = *pr80Src;
9518 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9519}
9520#endif
9521
9522
9523/**
9524 * Fetches a data dqword (double qword), generally SSE related.
9525 *
9526 * @returns Strict VBox status code.
9527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9528 * @param pu128Dst Where to return the qword.
9529 * @param iSegReg The index of the segment register to use for
9530 * this access. The base and limits are checked.
9531 * @param GCPtrMem The address of the guest memory.
9532 */
9533IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9534{
9535 /* The lazy approach for now... */
9536 PCRTUINT128U pu128Src;
9537 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9538 if (rc == VINF_SUCCESS)
9539 {
9540 pu128Dst->au64[0] = pu128Src->au64[0];
9541 pu128Dst->au64[1] = pu128Src->au64[1];
9542 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9543 }
9544 return rc;
9545}
9546
9547
9548#ifdef IEM_WITH_SETJMP
9549/**
9550 * Fetches a data dqword (double qword), generally SSE related.
9551 *
9552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9553 * @param pu128Dst Where to return the qword.
9554 * @param iSegReg The index of the segment register to use for
9555 * this access. The base and limits are checked.
9556 * @param GCPtrMem The address of the guest memory.
9557 */
9558IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9559{
9560 /* The lazy approach for now... */
9561 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9562 pu128Dst->au64[0] = pu128Src->au64[0];
9563 pu128Dst->au64[1] = pu128Src->au64[1];
9564 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9565}
9566#endif
9567
9568
9569/**
9570 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9571 * related.
9572 *
9573 * Raises \#GP(0) if not aligned.
9574 *
9575 * @returns Strict VBox status code.
9576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9577 * @param pu128Dst Where to return the qword.
9578 * @param iSegReg The index of the segment register to use for
9579 * this access. The base and limits are checked.
9580 * @param GCPtrMem The address of the guest memory.
9581 */
9582IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9583{
9584 /* The lazy approach for now... */
9585 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9586 if ( (GCPtrMem & 15)
9587 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9588 return iemRaiseGeneralProtectionFault0(pVCpu);
9589
9590 PCRTUINT128U pu128Src;
9591 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9592 if (rc == VINF_SUCCESS)
9593 {
9594 pu128Dst->au64[0] = pu128Src->au64[0];
9595 pu128Dst->au64[1] = pu128Src->au64[1];
9596 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9597 }
9598 return rc;
9599}
9600
9601
9602#ifdef IEM_WITH_SETJMP
9603/**
9604 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9605 * related, longjmp on error.
9606 *
9607 * Raises \#GP(0) if not aligned.
9608 *
9609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9610 * @param pu128Dst Where to return the qword.
9611 * @param iSegReg The index of the segment register to use for
9612 * this access. The base and limits are checked.
9613 * @param GCPtrMem The address of the guest memory.
9614 */
9615DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9616{
9617 /* The lazy approach for now... */
9618 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9619 if ( (GCPtrMem & 15) == 0
9620 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9621 {
9622 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9623 pu128Dst->au64[0] = pu128Src->au64[0];
9624 pu128Dst->au64[1] = pu128Src->au64[1];
9625 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9626 return;
9627 }
9628
9629 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9630 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9631}
9632#endif
9633
9634
9635/**
9636 * Fetches a data oword (octo word), generally AVX related.
9637 *
9638 * @returns Strict VBox status code.
9639 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9640 * @param pu256Dst Where to return the qword.
9641 * @param iSegReg The index of the segment register to use for
9642 * this access. The base and limits are checked.
9643 * @param GCPtrMem The address of the guest memory.
9644 */
9645IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9646{
9647 /* The lazy approach for now... */
9648 PCRTUINT256U pu256Src;
9649 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9650 if (rc == VINF_SUCCESS)
9651 {
9652 pu256Dst->au64[0] = pu256Src->au64[0];
9653 pu256Dst->au64[1] = pu256Src->au64[1];
9654 pu256Dst->au64[2] = pu256Src->au64[2];
9655 pu256Dst->au64[3] = pu256Src->au64[3];
9656 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9657 }
9658 return rc;
9659}
9660
9661
9662#ifdef IEM_WITH_SETJMP
9663/**
9664 * Fetches a data oword (octo word), generally AVX related.
9665 *
9666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9667 * @param pu256Dst Where to return the qword.
9668 * @param iSegReg The index of the segment register to use for
9669 * this access. The base and limits are checked.
9670 * @param GCPtrMem The address of the guest memory.
9671 */
9672IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9673{
9674 /* The lazy approach for now... */
9675 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9676 pu256Dst->au64[0] = pu256Src->au64[0];
9677 pu256Dst->au64[1] = pu256Src->au64[1];
9678 pu256Dst->au64[2] = pu256Src->au64[2];
9679 pu256Dst->au64[3] = pu256Src->au64[3];
9680 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9681}
9682#endif
9683
9684
9685/**
9686 * Fetches a data oword (octo word) at an aligned address, generally AVX
9687 * related.
9688 *
9689 * Raises \#GP(0) if not aligned.
9690 *
9691 * @returns Strict VBox status code.
9692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9693 * @param pu256Dst Where to return the qword.
9694 * @param iSegReg The index of the segment register to use for
9695 * this access. The base and limits are checked.
9696 * @param GCPtrMem The address of the guest memory.
9697 */
9698IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9699{
9700 /* The lazy approach for now... */
9701 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9702 if (GCPtrMem & 31)
9703 return iemRaiseGeneralProtectionFault0(pVCpu);
9704
9705 PCRTUINT256U pu256Src;
9706 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9707 if (rc == VINF_SUCCESS)
9708 {
9709 pu256Dst->au64[0] = pu256Src->au64[0];
9710 pu256Dst->au64[1] = pu256Src->au64[1];
9711 pu256Dst->au64[2] = pu256Src->au64[2];
9712 pu256Dst->au64[3] = pu256Src->au64[3];
9713 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9714 }
9715 return rc;
9716}
9717
9718
9719#ifdef IEM_WITH_SETJMP
9720/**
9721 * Fetches a data oword (octo word) at an aligned address, generally AVX
9722 * related, longjmp on error.
9723 *
9724 * Raises \#GP(0) if not aligned.
9725 *
9726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9727 * @param pu256Dst Where to return the qword.
9728 * @param iSegReg The index of the segment register to use for
9729 * this access. The base and limits are checked.
9730 * @param GCPtrMem The address of the guest memory.
9731 */
9732DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9733{
9734 /* The lazy approach for now... */
9735 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9736 if ((GCPtrMem & 31) == 0)
9737 {
9738 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9739 pu256Dst->au64[0] = pu256Src->au64[0];
9740 pu256Dst->au64[1] = pu256Src->au64[1];
9741 pu256Dst->au64[2] = pu256Src->au64[2];
9742 pu256Dst->au64[3] = pu256Src->au64[3];
9743 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9744 return;
9745 }
9746
9747 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9748 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9749}
9750#endif
9751
9752
9753
9754/**
9755 * Fetches a descriptor register (lgdt, lidt).
9756 *
9757 * @returns Strict VBox status code.
9758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9759 * @param pcbLimit Where to return the limit.
9760 * @param pGCPtrBase Where to return the base.
9761 * @param iSegReg The index of the segment register to use for
9762 * this access. The base and limits are checked.
9763 * @param GCPtrMem The address of the guest memory.
9764 * @param enmOpSize The effective operand size.
9765 */
9766IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9767 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9768{
9769 /*
9770 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9771 * little special:
9772 * - The two reads are done separately.
9773 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9774 * - We suspect the 386 to actually commit the limit before the base in
9775 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9776 * don't try emulate this eccentric behavior, because it's not well
9777 * enough understood and rather hard to trigger.
9778 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9779 */
9780 VBOXSTRICTRC rcStrict;
9781 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9782 {
9783 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9784 if (rcStrict == VINF_SUCCESS)
9785 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9786 }
9787 else
9788 {
9789 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9790 if (enmOpSize == IEMMODE_32BIT)
9791 {
9792 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9793 {
9794 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9795 if (rcStrict == VINF_SUCCESS)
9796 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9797 }
9798 else
9799 {
9800 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9801 if (rcStrict == VINF_SUCCESS)
9802 {
9803 *pcbLimit = (uint16_t)uTmp;
9804 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9805 }
9806 }
9807 if (rcStrict == VINF_SUCCESS)
9808 *pGCPtrBase = uTmp;
9809 }
9810 else
9811 {
9812 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9813 if (rcStrict == VINF_SUCCESS)
9814 {
9815 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9816 if (rcStrict == VINF_SUCCESS)
9817 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9818 }
9819 }
9820 }
9821 return rcStrict;
9822}
9823
9824
9825
9826/**
9827 * Stores a data byte.
9828 *
9829 * @returns Strict VBox status code.
9830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9831 * @param iSegReg The index of the segment register to use for
9832 * this access. The base and limits are checked.
9833 * @param GCPtrMem The address of the guest memory.
9834 * @param u8Value The value to store.
9835 */
9836IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9837{
9838 /* The lazy approach for now... */
9839 uint8_t *pu8Dst;
9840 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9841 if (rc == VINF_SUCCESS)
9842 {
9843 *pu8Dst = u8Value;
9844 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9845 }
9846 return rc;
9847}
9848
9849
9850#ifdef IEM_WITH_SETJMP
9851/**
9852 * Stores a data byte, longjmp on error.
9853 *
9854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9855 * @param iSegReg The index of the segment register to use for
9856 * this access. The base and limits are checked.
9857 * @param GCPtrMem The address of the guest memory.
9858 * @param u8Value The value to store.
9859 */
9860IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9861{
9862 /* The lazy approach for now... */
9863 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9864 *pu8Dst = u8Value;
9865 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9866}
9867#endif
9868
9869
9870/**
9871 * Stores a data word.
9872 *
9873 * @returns Strict VBox status code.
9874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9875 * @param iSegReg The index of the segment register to use for
9876 * this access. The base and limits are checked.
9877 * @param GCPtrMem The address of the guest memory.
9878 * @param u16Value The value to store.
9879 */
9880IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9881{
9882 /* The lazy approach for now... */
9883 uint16_t *pu16Dst;
9884 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9885 if (rc == VINF_SUCCESS)
9886 {
9887 *pu16Dst = u16Value;
9888 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9889 }
9890 return rc;
9891}
9892
9893
9894#ifdef IEM_WITH_SETJMP
9895/**
9896 * Stores a data word, longjmp on error.
9897 *
9898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9899 * @param iSegReg The index of the segment register to use for
9900 * this access. The base and limits are checked.
9901 * @param GCPtrMem The address of the guest memory.
9902 * @param u16Value The value to store.
9903 */
9904IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9905{
9906 /* The lazy approach for now... */
9907 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9908 *pu16Dst = u16Value;
9909 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9910}
9911#endif
9912
9913
9914/**
9915 * Stores a data dword.
9916 *
9917 * @returns Strict VBox status code.
9918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9919 * @param iSegReg The index of the segment register to use for
9920 * this access. The base and limits are checked.
9921 * @param GCPtrMem The address of the guest memory.
9922 * @param u32Value The value to store.
9923 */
9924IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9925{
9926 /* The lazy approach for now... */
9927 uint32_t *pu32Dst;
9928 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9929 if (rc == VINF_SUCCESS)
9930 {
9931 *pu32Dst = u32Value;
9932 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9933 }
9934 return rc;
9935}
9936
9937
9938#ifdef IEM_WITH_SETJMP
9939/**
9940 * Stores a data dword.
9941 *
9942 * @returns Strict VBox status code.
9943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9944 * @param iSegReg The index of the segment register to use for
9945 * this access. The base and limits are checked.
9946 * @param GCPtrMem The address of the guest memory.
9947 * @param u32Value The value to store.
9948 */
9949IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9950{
9951 /* The lazy approach for now... */
9952 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9953 *pu32Dst = u32Value;
9954 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9955}
9956#endif
9957
9958
9959/**
9960 * Stores a data qword.
9961 *
9962 * @returns Strict VBox status code.
9963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9964 * @param iSegReg The index of the segment register to use for
9965 * this access. The base and limits are checked.
9966 * @param GCPtrMem The address of the guest memory.
9967 * @param u64Value The value to store.
9968 */
9969IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9970{
9971 /* The lazy approach for now... */
9972 uint64_t *pu64Dst;
9973 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9974 if (rc == VINF_SUCCESS)
9975 {
9976 *pu64Dst = u64Value;
9977 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9978 }
9979 return rc;
9980}
9981
9982
9983#ifdef IEM_WITH_SETJMP
9984/**
9985 * Stores a data qword, longjmp on error.
9986 *
9987 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9988 * @param iSegReg The index of the segment register to use for
9989 * this access. The base and limits are checked.
9990 * @param GCPtrMem The address of the guest memory.
9991 * @param u64Value The value to store.
9992 */
9993IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9994{
9995 /* The lazy approach for now... */
9996 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9997 *pu64Dst = u64Value;
9998 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9999}
10000#endif
10001
10002
10003/**
10004 * Stores a data dqword.
10005 *
10006 * @returns Strict VBox status code.
10007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10008 * @param iSegReg The index of the segment register to use for
10009 * this access. The base and limits are checked.
10010 * @param GCPtrMem The address of the guest memory.
10011 * @param u128Value The value to store.
10012 */
10013IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10014{
10015 /* The lazy approach for now... */
10016 PRTUINT128U pu128Dst;
10017 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10018 if (rc == VINF_SUCCESS)
10019 {
10020 pu128Dst->au64[0] = u128Value.au64[0];
10021 pu128Dst->au64[1] = u128Value.au64[1];
10022 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10023 }
10024 return rc;
10025}
10026
10027
10028#ifdef IEM_WITH_SETJMP
10029/**
10030 * Stores a data dqword, longjmp on error.
10031 *
10032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10033 * @param iSegReg The index of the segment register to use for
10034 * this access. The base and limits are checked.
10035 * @param GCPtrMem The address of the guest memory.
10036 * @param u128Value The value to store.
10037 */
10038IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10039{
10040 /* The lazy approach for now... */
10041 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10042 pu128Dst->au64[0] = u128Value.au64[0];
10043 pu128Dst->au64[1] = u128Value.au64[1];
10044 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10045}
10046#endif
10047
10048
10049/**
10050 * Stores a data dqword, SSE aligned.
10051 *
10052 * @returns Strict VBox status code.
10053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10054 * @param iSegReg The index of the segment register to use for
10055 * this access. The base and limits are checked.
10056 * @param GCPtrMem The address of the guest memory.
10057 * @param u128Value The value to store.
10058 */
10059IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10060{
10061 /* The lazy approach for now... */
10062 if ( (GCPtrMem & 15)
10063 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10064 return iemRaiseGeneralProtectionFault0(pVCpu);
10065
10066 PRTUINT128U pu128Dst;
10067 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10068 if (rc == VINF_SUCCESS)
10069 {
10070 pu128Dst->au64[0] = u128Value.au64[0];
10071 pu128Dst->au64[1] = u128Value.au64[1];
10072 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10073 }
10074 return rc;
10075}
10076
10077
10078#ifdef IEM_WITH_SETJMP
10079/**
10080 * Stores a data dqword, SSE aligned.
10081 *
10082 * @returns Strict VBox status code.
10083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10084 * @param iSegReg The index of the segment register to use for
10085 * this access. The base and limits are checked.
10086 * @param GCPtrMem The address of the guest memory.
10087 * @param u128Value The value to store.
10088 */
10089DECL_NO_INLINE(IEM_STATIC, void)
10090iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10091{
10092 /* The lazy approach for now... */
10093 if ( (GCPtrMem & 15) == 0
10094 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10095 {
10096 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10097 pu128Dst->au64[0] = u128Value.au64[0];
10098 pu128Dst->au64[1] = u128Value.au64[1];
10099 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10100 return;
10101 }
10102
10103 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10104 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10105}
10106#endif
10107
10108
10109/**
10110 * Stores a data dqword.
10111 *
10112 * @returns Strict VBox status code.
10113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10114 * @param iSegReg The index of the segment register to use for
10115 * this access. The base and limits are checked.
10116 * @param GCPtrMem The address of the guest memory.
10117 * @param pu256Value Pointer to the value to store.
10118 */
10119IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10120{
10121 /* The lazy approach for now... */
10122 PRTUINT256U pu256Dst;
10123 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10124 if (rc == VINF_SUCCESS)
10125 {
10126 pu256Dst->au64[0] = pu256Value->au64[0];
10127 pu256Dst->au64[1] = pu256Value->au64[1];
10128 pu256Dst->au64[2] = pu256Value->au64[2];
10129 pu256Dst->au64[3] = pu256Value->au64[3];
10130 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10131 }
10132 return rc;
10133}
10134
10135
10136#ifdef IEM_WITH_SETJMP
10137/**
10138 * Stores a data dqword, longjmp on error.
10139 *
10140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10141 * @param iSegReg The index of the segment register to use for
10142 * this access. The base and limits are checked.
10143 * @param GCPtrMem The address of the guest memory.
10144 * @param pu256Value Pointer to the value to store.
10145 */
10146IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10147{
10148 /* The lazy approach for now... */
10149 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10150 pu256Dst->au64[0] = pu256Value->au64[0];
10151 pu256Dst->au64[1] = pu256Value->au64[1];
10152 pu256Dst->au64[2] = pu256Value->au64[2];
10153 pu256Dst->au64[3] = pu256Value->au64[3];
10154 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10155}
10156#endif
10157
10158
10159/**
10160 * Stores a data dqword, AVX aligned.
10161 *
10162 * @returns Strict VBox status code.
10163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10164 * @param iSegReg The index of the segment register to use for
10165 * this access. The base and limits are checked.
10166 * @param GCPtrMem The address of the guest memory.
10167 * @param pu256Value Pointer to the value to store.
10168 */
10169IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10170{
10171 /* The lazy approach for now... */
10172 if (GCPtrMem & 31)
10173 return iemRaiseGeneralProtectionFault0(pVCpu);
10174
10175 PRTUINT256U pu256Dst;
10176 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10177 if (rc == VINF_SUCCESS)
10178 {
10179 pu256Dst->au64[0] = pu256Value->au64[0];
10180 pu256Dst->au64[1] = pu256Value->au64[1];
10181 pu256Dst->au64[2] = pu256Value->au64[2];
10182 pu256Dst->au64[3] = pu256Value->au64[3];
10183 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10184 }
10185 return rc;
10186}
10187
10188
10189#ifdef IEM_WITH_SETJMP
10190/**
10191 * Stores a data dqword, AVX aligned.
10192 *
10193 * @returns Strict VBox status code.
10194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10195 * @param iSegReg The index of the segment register to use for
10196 * this access. The base and limits are checked.
10197 * @param GCPtrMem The address of the guest memory.
10198 * @param pu256Value Pointer to the value to store.
10199 */
10200DECL_NO_INLINE(IEM_STATIC, void)
10201iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10202{
10203 /* The lazy approach for now... */
10204 if ((GCPtrMem & 31) == 0)
10205 {
10206 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10207 pu256Dst->au64[0] = pu256Value->au64[0];
10208 pu256Dst->au64[1] = pu256Value->au64[1];
10209 pu256Dst->au64[2] = pu256Value->au64[2];
10210 pu256Dst->au64[3] = pu256Value->au64[3];
10211 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10212 return;
10213 }
10214
10215 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10216 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10217}
10218#endif
10219
10220
10221/**
10222 * Stores a descriptor register (sgdt, sidt).
10223 *
10224 * @returns Strict VBox status code.
10225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10226 * @param cbLimit The limit.
10227 * @param GCPtrBase The base address.
10228 * @param iSegReg The index of the segment register to use for
10229 * this access. The base and limits are checked.
10230 * @param GCPtrMem The address of the guest memory.
10231 */
10232IEM_STATIC VBOXSTRICTRC
10233iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10234{
10235 VBOXSTRICTRC rcStrict;
10236 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
10237 {
10238 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
10239 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
10240 }
10241
10242 /*
10243 * The SIDT and SGDT instructions actually stores the data using two
10244 * independent writes. The instructions does not respond to opsize prefixes.
10245 */
10246 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10247 if (rcStrict == VINF_SUCCESS)
10248 {
10249 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10250 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10251 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10252 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10253 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10254 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10255 else
10256 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10257 }
10258 return rcStrict;
10259}
10260
10261
10262/**
10263 * Pushes a word onto the stack.
10264 *
10265 * @returns Strict VBox status code.
10266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10267 * @param u16Value The value to push.
10268 */
10269IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10270{
10271 /* Increment the stack pointer. */
10272 uint64_t uNewRsp;
10273 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10274 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10275
10276 /* Write the word the lazy way. */
10277 uint16_t *pu16Dst;
10278 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10279 if (rc == VINF_SUCCESS)
10280 {
10281 *pu16Dst = u16Value;
10282 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10283 }
10284
10285 /* Commit the new RSP value unless we an access handler made trouble. */
10286 if (rc == VINF_SUCCESS)
10287 pCtx->rsp = uNewRsp;
10288
10289 return rc;
10290}
10291
10292
10293/**
10294 * Pushes a dword onto the stack.
10295 *
10296 * @returns Strict VBox status code.
10297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10298 * @param u32Value The value to push.
10299 */
10300IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10301{
10302 /* Increment the stack pointer. */
10303 uint64_t uNewRsp;
10304 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10305 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10306
10307 /* Write the dword the lazy way. */
10308 uint32_t *pu32Dst;
10309 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10310 if (rc == VINF_SUCCESS)
10311 {
10312 *pu32Dst = u32Value;
10313 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10314 }
10315
10316 /* Commit the new RSP value unless we an access handler made trouble. */
10317 if (rc == VINF_SUCCESS)
10318 pCtx->rsp = uNewRsp;
10319
10320 return rc;
10321}
10322
10323
10324/**
10325 * Pushes a dword segment register value onto the stack.
10326 *
10327 * @returns Strict VBox status code.
10328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10329 * @param u32Value The value to push.
10330 */
10331IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10332{
10333 /* Increment the stack pointer. */
10334 uint64_t uNewRsp;
10335 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10336 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10337
10338 VBOXSTRICTRC rc;
10339 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10340 {
10341 /* The recompiler writes a full dword. */
10342 uint32_t *pu32Dst;
10343 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10344 if (rc == VINF_SUCCESS)
10345 {
10346 *pu32Dst = u32Value;
10347 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10348 }
10349 }
10350 else
10351 {
10352 /* The intel docs talks about zero extending the selector register
10353 value. My actual intel CPU here might be zero extending the value
10354 but it still only writes the lower word... */
10355 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10356 * happens when crossing an electric page boundrary, is the high word checked
10357 * for write accessibility or not? Probably it is. What about segment limits?
10358 * It appears this behavior is also shared with trap error codes.
10359 *
10360 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10361 * ancient hardware when it actually did change. */
10362 uint16_t *pu16Dst;
10363 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10364 if (rc == VINF_SUCCESS)
10365 {
10366 *pu16Dst = (uint16_t)u32Value;
10367 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10368 }
10369 }
10370
10371 /* Commit the new RSP value unless we an access handler made trouble. */
10372 if (rc == VINF_SUCCESS)
10373 pCtx->rsp = uNewRsp;
10374
10375 return rc;
10376}
10377
10378
10379/**
10380 * Pushes a qword onto the stack.
10381 *
10382 * @returns Strict VBox status code.
10383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10384 * @param u64Value The value to push.
10385 */
10386IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10387{
10388 /* Increment the stack pointer. */
10389 uint64_t uNewRsp;
10390 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10391 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10392
10393 /* Write the word the lazy way. */
10394 uint64_t *pu64Dst;
10395 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10396 if (rc == VINF_SUCCESS)
10397 {
10398 *pu64Dst = u64Value;
10399 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10400 }
10401
10402 /* Commit the new RSP value unless we an access handler made trouble. */
10403 if (rc == VINF_SUCCESS)
10404 pCtx->rsp = uNewRsp;
10405
10406 return rc;
10407}
10408
10409
10410/**
10411 * Pops a word from the stack.
10412 *
10413 * @returns Strict VBox status code.
10414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10415 * @param pu16Value Where to store the popped value.
10416 */
10417IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10418{
10419 /* Increment the stack pointer. */
10420 uint64_t uNewRsp;
10421 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10422 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10423
10424 /* Write the word the lazy way. */
10425 uint16_t const *pu16Src;
10426 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10427 if (rc == VINF_SUCCESS)
10428 {
10429 *pu16Value = *pu16Src;
10430 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10431
10432 /* Commit the new RSP value. */
10433 if (rc == VINF_SUCCESS)
10434 pCtx->rsp = uNewRsp;
10435 }
10436
10437 return rc;
10438}
10439
10440
10441/**
10442 * Pops a dword from the stack.
10443 *
10444 * @returns Strict VBox status code.
10445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10446 * @param pu32Value Where to store the popped value.
10447 */
10448IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10449{
10450 /* Increment the stack pointer. */
10451 uint64_t uNewRsp;
10452 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10453 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10454
10455 /* Write the word the lazy way. */
10456 uint32_t const *pu32Src;
10457 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10458 if (rc == VINF_SUCCESS)
10459 {
10460 *pu32Value = *pu32Src;
10461 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10462
10463 /* Commit the new RSP value. */
10464 if (rc == VINF_SUCCESS)
10465 pCtx->rsp = uNewRsp;
10466 }
10467
10468 return rc;
10469}
10470
10471
10472/**
10473 * Pops a qword from the stack.
10474 *
10475 * @returns Strict VBox status code.
10476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10477 * @param pu64Value Where to store the popped value.
10478 */
10479IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10480{
10481 /* Increment the stack pointer. */
10482 uint64_t uNewRsp;
10483 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10484 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10485
10486 /* Write the word the lazy way. */
10487 uint64_t const *pu64Src;
10488 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10489 if (rc == VINF_SUCCESS)
10490 {
10491 *pu64Value = *pu64Src;
10492 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10493
10494 /* Commit the new RSP value. */
10495 if (rc == VINF_SUCCESS)
10496 pCtx->rsp = uNewRsp;
10497 }
10498
10499 return rc;
10500}
10501
10502
10503/**
10504 * Pushes a word onto the stack, using a temporary stack pointer.
10505 *
10506 * @returns Strict VBox status code.
10507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10508 * @param u16Value The value to push.
10509 * @param pTmpRsp Pointer to the temporary stack pointer.
10510 */
10511IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10512{
10513 /* Increment the stack pointer. */
10514 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10515 RTUINT64U NewRsp = *pTmpRsp;
10516 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10517
10518 /* Write the word the lazy way. */
10519 uint16_t *pu16Dst;
10520 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10521 if (rc == VINF_SUCCESS)
10522 {
10523 *pu16Dst = u16Value;
10524 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10525 }
10526
10527 /* Commit the new RSP value unless we an access handler made trouble. */
10528 if (rc == VINF_SUCCESS)
10529 *pTmpRsp = NewRsp;
10530
10531 return rc;
10532}
10533
10534
10535/**
10536 * Pushes a dword onto the stack, using a temporary stack pointer.
10537 *
10538 * @returns Strict VBox status code.
10539 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10540 * @param u32Value The value to push.
10541 * @param pTmpRsp Pointer to the temporary stack pointer.
10542 */
10543IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10544{
10545 /* Increment the stack pointer. */
10546 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10547 RTUINT64U NewRsp = *pTmpRsp;
10548 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10549
10550 /* Write the word the lazy way. */
10551 uint32_t *pu32Dst;
10552 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10553 if (rc == VINF_SUCCESS)
10554 {
10555 *pu32Dst = u32Value;
10556 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10557 }
10558
10559 /* Commit the new RSP value unless we an access handler made trouble. */
10560 if (rc == VINF_SUCCESS)
10561 *pTmpRsp = NewRsp;
10562
10563 return rc;
10564}
10565
10566
10567/**
10568 * Pushes a dword onto the stack, using a temporary stack pointer.
10569 *
10570 * @returns Strict VBox status code.
10571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10572 * @param u64Value The value to push.
10573 * @param pTmpRsp Pointer to the temporary stack pointer.
10574 */
10575IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10576{
10577 /* Increment the stack pointer. */
10578 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10579 RTUINT64U NewRsp = *pTmpRsp;
10580 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10581
10582 /* Write the word the lazy way. */
10583 uint64_t *pu64Dst;
10584 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10585 if (rc == VINF_SUCCESS)
10586 {
10587 *pu64Dst = u64Value;
10588 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10589 }
10590
10591 /* Commit the new RSP value unless we an access handler made trouble. */
10592 if (rc == VINF_SUCCESS)
10593 *pTmpRsp = NewRsp;
10594
10595 return rc;
10596}
10597
10598
10599/**
10600 * Pops a word from the stack, using a temporary stack pointer.
10601 *
10602 * @returns Strict VBox status code.
10603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10604 * @param pu16Value Where to store the popped value.
10605 * @param pTmpRsp Pointer to the temporary stack pointer.
10606 */
10607IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10608{
10609 /* Increment the stack pointer. */
10610 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10611 RTUINT64U NewRsp = *pTmpRsp;
10612 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10613
10614 /* Write the word the lazy way. */
10615 uint16_t const *pu16Src;
10616 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10617 if (rc == VINF_SUCCESS)
10618 {
10619 *pu16Value = *pu16Src;
10620 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10621
10622 /* Commit the new RSP value. */
10623 if (rc == VINF_SUCCESS)
10624 *pTmpRsp = NewRsp;
10625 }
10626
10627 return rc;
10628}
10629
10630
10631/**
10632 * Pops a dword from the stack, using a temporary stack pointer.
10633 *
10634 * @returns Strict VBox status code.
10635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10636 * @param pu32Value Where to store the popped value.
10637 * @param pTmpRsp Pointer to the temporary stack pointer.
10638 */
10639IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10640{
10641 /* Increment the stack pointer. */
10642 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10643 RTUINT64U NewRsp = *pTmpRsp;
10644 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10645
10646 /* Write the word the lazy way. */
10647 uint32_t const *pu32Src;
10648 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10649 if (rc == VINF_SUCCESS)
10650 {
10651 *pu32Value = *pu32Src;
10652 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10653
10654 /* Commit the new RSP value. */
10655 if (rc == VINF_SUCCESS)
10656 *pTmpRsp = NewRsp;
10657 }
10658
10659 return rc;
10660}
10661
10662
10663/**
10664 * Pops a qword from the stack, using a temporary stack pointer.
10665 *
10666 * @returns Strict VBox status code.
10667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10668 * @param pu64Value Where to store the popped value.
10669 * @param pTmpRsp Pointer to the temporary stack pointer.
10670 */
10671IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10672{
10673 /* Increment the stack pointer. */
10674 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10675 RTUINT64U NewRsp = *pTmpRsp;
10676 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10677
10678 /* Write the word the lazy way. */
10679 uint64_t const *pu64Src;
10680 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10681 if (rcStrict == VINF_SUCCESS)
10682 {
10683 *pu64Value = *pu64Src;
10684 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10685
10686 /* Commit the new RSP value. */
10687 if (rcStrict == VINF_SUCCESS)
10688 *pTmpRsp = NewRsp;
10689 }
10690
10691 return rcStrict;
10692}
10693
10694
10695/**
10696 * Begin a special stack push (used by interrupt, exceptions and such).
10697 *
10698 * This will raise \#SS or \#PF if appropriate.
10699 *
10700 * @returns Strict VBox status code.
10701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10702 * @param cbMem The number of bytes to push onto the stack.
10703 * @param ppvMem Where to return the pointer to the stack memory.
10704 * As with the other memory functions this could be
10705 * direct access or bounce buffered access, so
10706 * don't commit register until the commit call
10707 * succeeds.
10708 * @param puNewRsp Where to return the new RSP value. This must be
10709 * passed unchanged to
10710 * iemMemStackPushCommitSpecial().
10711 */
10712IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10713{
10714 Assert(cbMem < UINT8_MAX);
10715 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10716 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10717 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10718}
10719
10720
10721/**
10722 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10723 *
10724 * This will update the rSP.
10725 *
10726 * @returns Strict VBox status code.
10727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10728 * @param pvMem The pointer returned by
10729 * iemMemStackPushBeginSpecial().
10730 * @param uNewRsp The new RSP value returned by
10731 * iemMemStackPushBeginSpecial().
10732 */
10733IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10734{
10735 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10736 if (rcStrict == VINF_SUCCESS)
10737 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10738 return rcStrict;
10739}
10740
10741
10742/**
10743 * Begin a special stack pop (used by iret, retf and such).
10744 *
10745 * This will raise \#SS or \#PF if appropriate.
10746 *
10747 * @returns Strict VBox status code.
10748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10749 * @param cbMem The number of bytes to pop from the stack.
10750 * @param ppvMem Where to return the pointer to the stack memory.
10751 * @param puNewRsp Where to return the new RSP value. This must be
10752 * assigned to CPUMCTX::rsp manually some time
10753 * after iemMemStackPopDoneSpecial() has been
10754 * called.
10755 */
10756IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10757{
10758 Assert(cbMem < UINT8_MAX);
10759 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10760 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10761 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10762}
10763
10764
10765/**
10766 * Continue a special stack pop (used by iret and retf).
10767 *
10768 * This will raise \#SS or \#PF if appropriate.
10769 *
10770 * @returns Strict VBox status code.
10771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10772 * @param cbMem The number of bytes to pop from the stack.
10773 * @param ppvMem Where to return the pointer to the stack memory.
10774 * @param puNewRsp Where to return the new RSP value. This must be
10775 * assigned to CPUMCTX::rsp manually some time
10776 * after iemMemStackPopDoneSpecial() has been
10777 * called.
10778 */
10779IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10780{
10781 Assert(cbMem < UINT8_MAX);
10782 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10783 RTUINT64U NewRsp;
10784 NewRsp.u = *puNewRsp;
10785 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10786 *puNewRsp = NewRsp.u;
10787 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10788}
10789
10790
10791/**
10792 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10793 * iemMemStackPopContinueSpecial).
10794 *
10795 * The caller will manually commit the rSP.
10796 *
10797 * @returns Strict VBox status code.
10798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10799 * @param pvMem The pointer returned by
10800 * iemMemStackPopBeginSpecial() or
10801 * iemMemStackPopContinueSpecial().
10802 */
10803IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10804{
10805 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10806}
10807
10808
10809/**
10810 * Fetches a system table byte.
10811 *
10812 * @returns Strict VBox status code.
10813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10814 * @param pbDst Where to return the byte.
10815 * @param iSegReg The index of the segment register to use for
10816 * this access. The base and limits are checked.
10817 * @param GCPtrMem The address of the guest memory.
10818 */
10819IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10820{
10821 /* The lazy approach for now... */
10822 uint8_t const *pbSrc;
10823 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10824 if (rc == VINF_SUCCESS)
10825 {
10826 *pbDst = *pbSrc;
10827 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10828 }
10829 return rc;
10830}
10831
10832
10833/**
10834 * Fetches a system table word.
10835 *
10836 * @returns Strict VBox status code.
10837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10838 * @param pu16Dst Where to return the word.
10839 * @param iSegReg The index of the segment register to use for
10840 * this access. The base and limits are checked.
10841 * @param GCPtrMem The address of the guest memory.
10842 */
10843IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10844{
10845 /* The lazy approach for now... */
10846 uint16_t const *pu16Src;
10847 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10848 if (rc == VINF_SUCCESS)
10849 {
10850 *pu16Dst = *pu16Src;
10851 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10852 }
10853 return rc;
10854}
10855
10856
10857/**
10858 * Fetches a system table dword.
10859 *
10860 * @returns Strict VBox status code.
10861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10862 * @param pu32Dst Where to return the dword.
10863 * @param iSegReg The index of the segment register to use for
10864 * this access. The base and limits are checked.
10865 * @param GCPtrMem The address of the guest memory.
10866 */
10867IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10868{
10869 /* The lazy approach for now... */
10870 uint32_t const *pu32Src;
10871 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10872 if (rc == VINF_SUCCESS)
10873 {
10874 *pu32Dst = *pu32Src;
10875 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10876 }
10877 return rc;
10878}
10879
10880
10881/**
10882 * Fetches a system table qword.
10883 *
10884 * @returns Strict VBox status code.
10885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10886 * @param pu64Dst Where to return the qword.
10887 * @param iSegReg The index of the segment register to use for
10888 * this access. The base and limits are checked.
10889 * @param GCPtrMem The address of the guest memory.
10890 */
10891IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10892{
10893 /* The lazy approach for now... */
10894 uint64_t const *pu64Src;
10895 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10896 if (rc == VINF_SUCCESS)
10897 {
10898 *pu64Dst = *pu64Src;
10899 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10900 }
10901 return rc;
10902}
10903
10904
10905/**
10906 * Fetches a descriptor table entry with caller specified error code.
10907 *
10908 * @returns Strict VBox status code.
10909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10910 * @param pDesc Where to return the descriptor table entry.
10911 * @param uSel The selector which table entry to fetch.
10912 * @param uXcpt The exception to raise on table lookup error.
10913 * @param uErrorCode The error code associated with the exception.
10914 */
10915IEM_STATIC VBOXSTRICTRC
10916iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10917{
10918 AssertPtr(pDesc);
10919 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10920
10921 /** @todo did the 286 require all 8 bytes to be accessible? */
10922 /*
10923 * Get the selector table base and check bounds.
10924 */
10925 RTGCPTR GCPtrBase;
10926 if (uSel & X86_SEL_LDT)
10927 {
10928 if ( !pCtx->ldtr.Attr.n.u1Present
10929 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10930 {
10931 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10932 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10933 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10934 uErrorCode, 0);
10935 }
10936
10937 Assert(pCtx->ldtr.Attr.n.u1Present);
10938 GCPtrBase = pCtx->ldtr.u64Base;
10939 }
10940 else
10941 {
10942 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10943 {
10944 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10945 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10946 uErrorCode, 0);
10947 }
10948 GCPtrBase = pCtx->gdtr.pGdt;
10949 }
10950
10951 /*
10952 * Read the legacy descriptor and maybe the long mode extensions if
10953 * required.
10954 */
10955 VBOXSTRICTRC rcStrict;
10956 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10957 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10958 else
10959 {
10960 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10961 if (rcStrict == VINF_SUCCESS)
10962 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10963 if (rcStrict == VINF_SUCCESS)
10964 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10965 if (rcStrict == VINF_SUCCESS)
10966 pDesc->Legacy.au16[3] = 0;
10967 else
10968 return rcStrict;
10969 }
10970
10971 if (rcStrict == VINF_SUCCESS)
10972 {
10973 if ( !IEM_IS_LONG_MODE(pVCpu)
10974 || pDesc->Legacy.Gen.u1DescType)
10975 pDesc->Long.au64[1] = 0;
10976 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10977 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10978 else
10979 {
10980 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10981 /** @todo is this the right exception? */
10982 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10983 }
10984 }
10985 return rcStrict;
10986}
10987
10988
10989/**
10990 * Fetches a descriptor table entry.
10991 *
10992 * @returns Strict VBox status code.
10993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10994 * @param pDesc Where to return the descriptor table entry.
10995 * @param uSel The selector which table entry to fetch.
10996 * @param uXcpt The exception to raise on table lookup error.
10997 */
10998IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10999{
11000 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11001}
11002
11003
11004/**
11005 * Fakes a long mode stack selector for SS = 0.
11006 *
11007 * @param pDescSs Where to return the fake stack descriptor.
11008 * @param uDpl The DPL we want.
11009 */
11010IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11011{
11012 pDescSs->Long.au64[0] = 0;
11013 pDescSs->Long.au64[1] = 0;
11014 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11015 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11016 pDescSs->Long.Gen.u2Dpl = uDpl;
11017 pDescSs->Long.Gen.u1Present = 1;
11018 pDescSs->Long.Gen.u1Long = 1;
11019}
11020
11021
11022/**
11023 * Marks the selector descriptor as accessed (only non-system descriptors).
11024 *
11025 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11026 * will therefore skip the limit checks.
11027 *
11028 * @returns Strict VBox status code.
11029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11030 * @param uSel The selector.
11031 */
11032IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11033{
11034 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11035
11036 /*
11037 * Get the selector table base and calculate the entry address.
11038 */
11039 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11040 ? pCtx->ldtr.u64Base
11041 : pCtx->gdtr.pGdt;
11042 GCPtr += uSel & X86_SEL_MASK;
11043
11044 /*
11045 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11046 * ugly stuff to avoid this. This will make sure it's an atomic access
11047 * as well more or less remove any question about 8-bit or 32-bit accesss.
11048 */
11049 VBOXSTRICTRC rcStrict;
11050 uint32_t volatile *pu32;
11051 if ((GCPtr & 3) == 0)
11052 {
11053 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11054 GCPtr += 2 + 2;
11055 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11056 if (rcStrict != VINF_SUCCESS)
11057 return rcStrict;
11058 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11059 }
11060 else
11061 {
11062 /* The misaligned GDT/LDT case, map the whole thing. */
11063 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11064 if (rcStrict != VINF_SUCCESS)
11065 return rcStrict;
11066 switch ((uintptr_t)pu32 & 3)
11067 {
11068 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11069 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11070 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11071 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11072 }
11073 }
11074
11075 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11076}
11077
11078/** @} */
11079
11080
11081/*
11082 * Include the C/C++ implementation of instruction.
11083 */
11084#include "IEMAllCImpl.cpp.h"
11085
11086
11087
11088/** @name "Microcode" macros.
11089 *
11090 * The idea is that we should be able to use the same code to interpret
11091 * instructions as well as recompiler instructions. Thus this obfuscation.
11092 *
11093 * @{
11094 */
11095#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11096#define IEM_MC_END() }
11097#define IEM_MC_PAUSE() do {} while (0)
11098#define IEM_MC_CONTINUE() do {} while (0)
11099
11100/** Internal macro. */
11101#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11102 do \
11103 { \
11104 VBOXSTRICTRC rcStrict2 = a_Expr; \
11105 if (rcStrict2 != VINF_SUCCESS) \
11106 return rcStrict2; \
11107 } while (0)
11108
11109
11110#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11111#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11112#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11113#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11114#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11115#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11116#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11117#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11118#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11119 do { \
11120 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11121 return iemRaiseDeviceNotAvailable(pVCpu); \
11122 } while (0)
11123#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11124 do { \
11125 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11126 return iemRaiseDeviceNotAvailable(pVCpu); \
11127 } while (0)
11128#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11129 do { \
11130 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11131 return iemRaiseMathFault(pVCpu); \
11132 } while (0)
11133#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11134 do { \
11135 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11136 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11137 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11138 return iemRaiseUndefinedOpcode(pVCpu); \
11139 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11140 return iemRaiseDeviceNotAvailable(pVCpu); \
11141 } while (0)
11142#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11143 do { \
11144 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11145 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11146 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11147 return iemRaiseUndefinedOpcode(pVCpu); \
11148 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11149 return iemRaiseDeviceNotAvailable(pVCpu); \
11150 } while (0)
11151#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11152 do { \
11153 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11154 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11155 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11156 return iemRaiseUndefinedOpcode(pVCpu); \
11157 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11158 return iemRaiseDeviceNotAvailable(pVCpu); \
11159 } while (0)
11160#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11161 do { \
11162 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11163 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11164 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11165 return iemRaiseUndefinedOpcode(pVCpu); \
11166 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11167 return iemRaiseDeviceNotAvailable(pVCpu); \
11168 } while (0)
11169#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11170 do { \
11171 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11172 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11173 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11174 return iemRaiseUndefinedOpcode(pVCpu); \
11175 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11176 return iemRaiseDeviceNotAvailable(pVCpu); \
11177 } while (0)
11178#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11179 do { \
11180 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11181 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11182 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11183 return iemRaiseUndefinedOpcode(pVCpu); \
11184 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11185 return iemRaiseDeviceNotAvailable(pVCpu); \
11186 } while (0)
11187#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11188 do { \
11189 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11190 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11191 return iemRaiseUndefinedOpcode(pVCpu); \
11192 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11193 return iemRaiseDeviceNotAvailable(pVCpu); \
11194 } while (0)
11195#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11196 do { \
11197 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11198 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11199 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11200 return iemRaiseUndefinedOpcode(pVCpu); \
11201 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11202 return iemRaiseDeviceNotAvailable(pVCpu); \
11203 } while (0)
11204#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11205 do { \
11206 if (pVCpu->iem.s.uCpl != 0) \
11207 return iemRaiseGeneralProtectionFault0(pVCpu); \
11208 } while (0)
11209#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11210 do { \
11211 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11212 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11213 } while (0)
11214
11215
11216#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11217#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11218#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11219#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11220#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11221#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11222#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11223 uint32_t a_Name; \
11224 uint32_t *a_pName = &a_Name
11225#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11226 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11227
11228#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11229#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11230
11231#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11232#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11233#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11234#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11235#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11236#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11237#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11238#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11239#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11240#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11241#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11242#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11243#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11244#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11245#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11246#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11247#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11248#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11249#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11250#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11251#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11252#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11253#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11254#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11255#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11256#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11257#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11258#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11259#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11260/** @note Not for IOPL or IF testing or modification. */
11261#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11262#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11263#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11264#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11265
11266#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11267#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11268#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11269#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11270#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11271#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11272#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11273#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11274#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11275#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11276#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11277 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11278
11279#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11280#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11281/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11282 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11283#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11284#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11285/** @note Not for IOPL or IF testing or modification. */
11286#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11287
11288#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11289#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11290#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11291 do { \
11292 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11293 *pu32Reg += (a_u32Value); \
11294 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11295 } while (0)
11296#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11297
11298#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11299#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11300#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11301 do { \
11302 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11303 *pu32Reg -= (a_u32Value); \
11304 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11305 } while (0)
11306#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11307#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11308
11309#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11310#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11311#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11312#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11313#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11314#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11315#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11316
11317#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11318#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11319#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11320#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11321
11322#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11323#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11324#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11325
11326#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11327#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11328#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11329
11330#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11331#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11332#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11333
11334#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11335#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11336#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11337
11338#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11339
11340#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11341
11342#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11343#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11344#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11345 do { \
11346 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11347 *pu32Reg &= (a_u32Value); \
11348 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11349 } while (0)
11350#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11351
11352#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11353#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11354#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11355 do { \
11356 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11357 *pu32Reg |= (a_u32Value); \
11358 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11359 } while (0)
11360#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11361
11362
11363/** @note Not for IOPL or IF modification. */
11364#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11365/** @note Not for IOPL or IF modification. */
11366#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11367/** @note Not for IOPL or IF modification. */
11368#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11369
11370#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11371
11372/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11373#define IEM_MC_FPU_TO_MMX_MODE() do { \
11374 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11375 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11376 } while (0)
11377
11378/** Switches the FPU state from MMX mode (FTW=0xffff). */
11379#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11380 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0; \
11381 } while (0)
11382
11383#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11384 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11385#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11386 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11387#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11388 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11389 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11390 } while (0)
11391#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11392 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11393 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11394 } while (0)
11395#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11396 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11397#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11398 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11399#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11400 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11401
11402#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11403 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11404 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11405 } while (0)
11406#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11407 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11408#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11409 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11410#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11411 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11412#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11413 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11414 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11415 } while (0)
11416#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11417 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11418#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11419 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11420 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11421 } while (0)
11422#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11423 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11424#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11425 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11426 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11427 } while (0)
11428#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11429 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11430#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11431 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11432#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11433 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11434#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11435 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11436#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11437 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11438 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11439 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11440 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11441 } while (0)
11442
11443#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11444 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11445 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11446 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11447 } while (0)
11448#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11449 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11450 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11451 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11452 } while (0)
11453#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11454 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11455 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11456 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11457 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11458 } while (0)
11459#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11460 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11461 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11462 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11463 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11464 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11465 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11466 } while (0)
11467
11468#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11469#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11470 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11471 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11472 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11473 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11474 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11475 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11476 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11477 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11478 } while (0)
11479#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11480 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11481 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11482 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11483 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11484 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11485 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11486 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11487 } while (0)
11488#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11489 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11490 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11491 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11492 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11493 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11494 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11495 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11496 } while (0)
11497#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11498 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11499 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11500 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11501 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11502 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11503 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11504 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11505 } while (0)
11506
11507#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11508 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11509#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11510 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11511#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11512 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11513#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11514 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11515 uintptr_t const iYRegTmp = (a_iYReg); \
11516 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11517 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11518 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11519 } while (0)
11520
11521#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11522 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11523 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11524 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11525 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11526 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11527 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11528 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11529 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11530 } while (0)
11531#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11532 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11533 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11534 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11535 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11536 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11537 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11538 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11539 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11540 } while (0)
11541#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11542 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11543 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11544 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11545 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11546 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11547 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11548 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11549 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11550 } while (0)
11551
11552#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11553 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11554 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11555 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11556 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11557 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11558 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11559 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11560 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11561 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11562 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11563 } while (0)
11564#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11565 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11566 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11567 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11568 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11569 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11570 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11571 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11572 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11573 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11574 } while (0)
11575#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11576 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11577 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11578 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11579 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11580 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11581 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11582 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11583 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11584 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11585 } while (0)
11586#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11587 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11588 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11589 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11590 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11591 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11592 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11593 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11594 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11595 } while (0)
11596
11597#ifndef IEM_WITH_SETJMP
11598# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11599 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11600# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11601 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11602# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11603 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11604#else
11605# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11606 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11607# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11608 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11609# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11610 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11611#endif
11612
11613#ifndef IEM_WITH_SETJMP
11614# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11615 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11616# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11617 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11618# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11619 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11620#else
11621# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11622 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11623# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11624 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11625# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11626 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11627#endif
11628
11629#ifndef IEM_WITH_SETJMP
11630# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11631 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11632# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11633 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11634# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11635 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11636#else
11637# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11638 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11639# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11640 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11641# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11642 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11643#endif
11644
11645#ifdef SOME_UNUSED_FUNCTION
11646# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11647 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11648#endif
11649
11650#ifndef IEM_WITH_SETJMP
11651# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11652 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11653# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11654 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11655# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11656 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11657# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11658 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11659#else
11660# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11661 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11662# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11663 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11664# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11665 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11666# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11667 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11668#endif
11669
11670#ifndef IEM_WITH_SETJMP
11671# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11672 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11673# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11674 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11675# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11676 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11677#else
11678# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11679 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11680# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11681 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11682# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11683 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11684#endif
11685
11686#ifndef IEM_WITH_SETJMP
11687# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11688 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11689# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11690 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11691#else
11692# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11693 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11694# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11695 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11696#endif
11697
11698#ifndef IEM_WITH_SETJMP
11699# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11701# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11702 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11703#else
11704# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11705 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11706# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11707 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11708#endif
11709
11710
11711
11712#ifndef IEM_WITH_SETJMP
11713# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11714 do { \
11715 uint8_t u8Tmp; \
11716 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11717 (a_u16Dst) = u8Tmp; \
11718 } while (0)
11719# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11720 do { \
11721 uint8_t u8Tmp; \
11722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11723 (a_u32Dst) = u8Tmp; \
11724 } while (0)
11725# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11726 do { \
11727 uint8_t u8Tmp; \
11728 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11729 (a_u64Dst) = u8Tmp; \
11730 } while (0)
11731# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11732 do { \
11733 uint16_t u16Tmp; \
11734 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11735 (a_u32Dst) = u16Tmp; \
11736 } while (0)
11737# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11738 do { \
11739 uint16_t u16Tmp; \
11740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11741 (a_u64Dst) = u16Tmp; \
11742 } while (0)
11743# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11744 do { \
11745 uint32_t u32Tmp; \
11746 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11747 (a_u64Dst) = u32Tmp; \
11748 } while (0)
11749#else /* IEM_WITH_SETJMP */
11750# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11751 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11752# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11753 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11754# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11755 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11756# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11757 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11758# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11759 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11760# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11761 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11762#endif /* IEM_WITH_SETJMP */
11763
11764#ifndef IEM_WITH_SETJMP
11765# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11766 do { \
11767 uint8_t u8Tmp; \
11768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11769 (a_u16Dst) = (int8_t)u8Tmp; \
11770 } while (0)
11771# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11772 do { \
11773 uint8_t u8Tmp; \
11774 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11775 (a_u32Dst) = (int8_t)u8Tmp; \
11776 } while (0)
11777# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11778 do { \
11779 uint8_t u8Tmp; \
11780 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11781 (a_u64Dst) = (int8_t)u8Tmp; \
11782 } while (0)
11783# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11784 do { \
11785 uint16_t u16Tmp; \
11786 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11787 (a_u32Dst) = (int16_t)u16Tmp; \
11788 } while (0)
11789# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11790 do { \
11791 uint16_t u16Tmp; \
11792 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11793 (a_u64Dst) = (int16_t)u16Tmp; \
11794 } while (0)
11795# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11796 do { \
11797 uint32_t u32Tmp; \
11798 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11799 (a_u64Dst) = (int32_t)u32Tmp; \
11800 } while (0)
11801#else /* IEM_WITH_SETJMP */
11802# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11803 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11804# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11805 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11806# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11807 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11808# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11809 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11810# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11811 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11812# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11813 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11814#endif /* IEM_WITH_SETJMP */
11815
11816#ifndef IEM_WITH_SETJMP
11817# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11818 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11819# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11820 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11821# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11822 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11823# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11824 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11825#else
11826# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11827 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11828# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11829 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11830# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11831 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11832# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11833 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11834#endif
11835
11836#ifndef IEM_WITH_SETJMP
11837# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11838 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11839# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11840 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11841# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11842 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11843# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11844 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11845#else
11846# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11847 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11848# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11849 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11850# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11851 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11852# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11853 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11854#endif
11855
11856#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11857#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11858#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11859#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11860#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11861#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11862#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11863 do { \
11864 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11865 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11866 } while (0)
11867
11868#ifndef IEM_WITH_SETJMP
11869# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11870 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11871# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11872 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11873#else
11874# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11875 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11876# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11877 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11878#endif
11879
11880#ifndef IEM_WITH_SETJMP
11881# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11882 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11883# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11884 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11885#else
11886# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11887 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11888# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11889 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11890#endif
11891
11892
11893#define IEM_MC_PUSH_U16(a_u16Value) \
11894 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11895#define IEM_MC_PUSH_U32(a_u32Value) \
11896 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11897#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11898 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11899#define IEM_MC_PUSH_U64(a_u64Value) \
11900 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11901
11902#define IEM_MC_POP_U16(a_pu16Value) \
11903 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11904#define IEM_MC_POP_U32(a_pu32Value) \
11905 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11906#define IEM_MC_POP_U64(a_pu64Value) \
11907 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11908
11909/** Maps guest memory for direct or bounce buffered access.
11910 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11911 * @remarks May return.
11912 */
11913#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11914 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11915
11916/** Maps guest memory for direct or bounce buffered access.
11917 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11918 * @remarks May return.
11919 */
11920#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11921 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11922
11923/** Commits the memory and unmaps the guest memory.
11924 * @remarks May return.
11925 */
11926#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11927 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11928
11929/** Commits the memory and unmaps the guest memory unless the FPU status word
11930 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11931 * that would cause FLD not to store.
11932 *
11933 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11934 * store, while \#P will not.
11935 *
11936 * @remarks May in theory return - for now.
11937 */
11938#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11939 do { \
11940 if ( !(a_u16FSW & X86_FSW_ES) \
11941 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11942 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11943 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11944 } while (0)
11945
11946/** Calculate efficient address from R/M. */
11947#ifndef IEM_WITH_SETJMP
11948# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11949 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11950#else
11951# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11952 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11953#endif
11954
11955#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11956#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11957#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11958#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11959#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11960#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11961#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11962
11963/**
11964 * Defers the rest of the instruction emulation to a C implementation routine
11965 * and returns, only taking the standard parameters.
11966 *
11967 * @param a_pfnCImpl The pointer to the C routine.
11968 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11969 */
11970#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11971
11972/**
11973 * Defers the rest of instruction emulation to a C implementation routine and
11974 * returns, taking one argument in addition to the standard ones.
11975 *
11976 * @param a_pfnCImpl The pointer to the C routine.
11977 * @param a0 The argument.
11978 */
11979#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11980
11981/**
11982 * Defers the rest of the instruction emulation to a C implementation routine
11983 * and returns, taking two arguments in addition to the standard ones.
11984 *
11985 * @param a_pfnCImpl The pointer to the C routine.
11986 * @param a0 The first extra argument.
11987 * @param a1 The second extra argument.
11988 */
11989#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11990
11991/**
11992 * Defers the rest of the instruction emulation to a C implementation routine
11993 * and returns, taking three arguments in addition to the standard ones.
11994 *
11995 * @param a_pfnCImpl The pointer to the C routine.
11996 * @param a0 The first extra argument.
11997 * @param a1 The second extra argument.
11998 * @param a2 The third extra argument.
11999 */
12000#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12001
12002/**
12003 * Defers the rest of the instruction emulation to a C implementation routine
12004 * and returns, taking four arguments in addition to the standard ones.
12005 *
12006 * @param a_pfnCImpl The pointer to the C routine.
12007 * @param a0 The first extra argument.
12008 * @param a1 The second extra argument.
12009 * @param a2 The third extra argument.
12010 * @param a3 The fourth extra argument.
12011 */
12012#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12013
12014/**
12015 * Defers the rest of the instruction emulation to a C implementation routine
12016 * and returns, taking two arguments in addition to the standard ones.
12017 *
12018 * @param a_pfnCImpl The pointer to the C routine.
12019 * @param a0 The first extra argument.
12020 * @param a1 The second extra argument.
12021 * @param a2 The third extra argument.
12022 * @param a3 The fourth extra argument.
12023 * @param a4 The fifth extra argument.
12024 */
12025#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12026
12027/**
12028 * Defers the entire instruction emulation to a C implementation routine and
12029 * returns, only taking the standard parameters.
12030 *
12031 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12032 *
12033 * @param a_pfnCImpl The pointer to the C routine.
12034 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12035 */
12036#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12037
12038/**
12039 * Defers the entire instruction emulation to a C implementation routine and
12040 * returns, taking one argument in addition to the standard ones.
12041 *
12042 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12043 *
12044 * @param a_pfnCImpl The pointer to the C routine.
12045 * @param a0 The argument.
12046 */
12047#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12048
12049/**
12050 * Defers the entire instruction emulation to a C implementation routine and
12051 * returns, taking two arguments in addition to the standard ones.
12052 *
12053 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12054 *
12055 * @param a_pfnCImpl The pointer to the C routine.
12056 * @param a0 The first extra argument.
12057 * @param a1 The second extra argument.
12058 */
12059#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12060
12061/**
12062 * Defers the entire instruction emulation to a C implementation routine and
12063 * returns, taking three arguments in addition to the standard ones.
12064 *
12065 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12066 *
12067 * @param a_pfnCImpl The pointer to the C routine.
12068 * @param a0 The first extra argument.
12069 * @param a1 The second extra argument.
12070 * @param a2 The third extra argument.
12071 */
12072#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12073
12074/**
12075 * Calls a FPU assembly implementation taking one visible argument.
12076 *
12077 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12078 * @param a0 The first extra argument.
12079 */
12080#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12081 do { \
12082 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12083 } while (0)
12084
12085/**
12086 * Calls a FPU assembly implementation taking two visible arguments.
12087 *
12088 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12089 * @param a0 The first extra argument.
12090 * @param a1 The second extra argument.
12091 */
12092#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12093 do { \
12094 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12095 } while (0)
12096
12097/**
12098 * Calls a FPU assembly implementation taking three visible arguments.
12099 *
12100 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12101 * @param a0 The first extra argument.
12102 * @param a1 The second extra argument.
12103 * @param a2 The third extra argument.
12104 */
12105#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12106 do { \
12107 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12108 } while (0)
12109
12110#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12111 do { \
12112 (a_FpuData).FSW = (a_FSW); \
12113 (a_FpuData).r80Result = *(a_pr80Value); \
12114 } while (0)
12115
12116/** Pushes FPU result onto the stack. */
12117#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12118 iemFpuPushResult(pVCpu, &a_FpuData)
12119/** Pushes FPU result onto the stack and sets the FPUDP. */
12120#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12121 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12122
12123/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12124#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12125 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12126
12127/** Stores FPU result in a stack register. */
12128#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12129 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12130/** Stores FPU result in a stack register and pops the stack. */
12131#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12132 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12133/** Stores FPU result in a stack register and sets the FPUDP. */
12134#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12135 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12136/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12137 * stack. */
12138#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12139 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12140
12141/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12142#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12143 iemFpuUpdateOpcodeAndIp(pVCpu)
12144/** Free a stack register (for FFREE and FFREEP). */
12145#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12146 iemFpuStackFree(pVCpu, a_iStReg)
12147/** Increment the FPU stack pointer. */
12148#define IEM_MC_FPU_STACK_INC_TOP() \
12149 iemFpuStackIncTop(pVCpu)
12150/** Decrement the FPU stack pointer. */
12151#define IEM_MC_FPU_STACK_DEC_TOP() \
12152 iemFpuStackDecTop(pVCpu)
12153
12154/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12155#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12156 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12157/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12158#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12159 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12160/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12161#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12162 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12163/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12164#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12165 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12166/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12167 * stack. */
12168#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12169 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12170/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12171#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12172 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12173
12174/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12175#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12176 iemFpuStackUnderflow(pVCpu, a_iStDst)
12177/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12178 * stack. */
12179#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12180 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12181/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12182 * FPUDS. */
12183#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12184 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12185/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12186 * FPUDS. Pops stack. */
12187#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12188 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12189/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12190 * stack twice. */
12191#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12192 iemFpuStackUnderflowThenPopPop(pVCpu)
12193/** Raises a FPU stack underflow exception for an instruction pushing a result
12194 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12195#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12196 iemFpuStackPushUnderflow(pVCpu)
12197/** Raises a FPU stack underflow exception for an instruction pushing a result
12198 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12199#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12200 iemFpuStackPushUnderflowTwo(pVCpu)
12201
12202/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12203 * FPUIP, FPUCS and FOP. */
12204#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12205 iemFpuStackPushOverflow(pVCpu)
12206/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12207 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12208#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12209 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12210/** Prepares for using the FPU state.
12211 * Ensures that we can use the host FPU in the current context (RC+R0.
12212 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12213#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12214/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12215#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12216/** Actualizes the guest FPU state so it can be accessed and modified. */
12217#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12218
12219/** Prepares for using the SSE state.
12220 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12221 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12222#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12223/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12224#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12225/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12226#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12227
12228/** Prepares for using the AVX state.
12229 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12230 * Ensures the guest AVX state in the CPUMCTX is up to date.
12231 * @note This will include the AVX512 state too when support for it is added
12232 * due to the zero extending feature of VEX instruction. */
12233#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12234/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12235#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12236/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12237#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12238
12239/**
12240 * Calls a MMX assembly implementation taking two visible arguments.
12241 *
12242 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12243 * @param a0 The first extra argument.
12244 * @param a1 The second extra argument.
12245 */
12246#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12247 do { \
12248 IEM_MC_PREPARE_FPU_USAGE(); \
12249 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12250 } while (0)
12251
12252/**
12253 * Calls a MMX assembly implementation taking three visible arguments.
12254 *
12255 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12256 * @param a0 The first extra argument.
12257 * @param a1 The second extra argument.
12258 * @param a2 The third extra argument.
12259 */
12260#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12261 do { \
12262 IEM_MC_PREPARE_FPU_USAGE(); \
12263 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12264 } while (0)
12265
12266
12267/**
12268 * Calls a SSE assembly implementation taking two visible arguments.
12269 *
12270 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12271 * @param a0 The first extra argument.
12272 * @param a1 The second extra argument.
12273 */
12274#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12275 do { \
12276 IEM_MC_PREPARE_SSE_USAGE(); \
12277 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12278 } while (0)
12279
12280/**
12281 * Calls a SSE assembly implementation taking three visible arguments.
12282 *
12283 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12284 * @param a0 The first extra argument.
12285 * @param a1 The second extra argument.
12286 * @param a2 The third extra argument.
12287 */
12288#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12289 do { \
12290 IEM_MC_PREPARE_SSE_USAGE(); \
12291 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12292 } while (0)
12293
12294
12295/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12296 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12297#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12298 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0)
12299
12300/**
12301 * Calls a AVX assembly implementation taking two visible arguments.
12302 *
12303 * There is one implicit zero'th argument, a pointer to the extended state.
12304 *
12305 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12306 * @param a1 The first extra argument.
12307 * @param a2 The second extra argument.
12308 */
12309#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12310 do { \
12311 IEM_MC_PREPARE_AVX_USAGE(); \
12312 a_pfnAImpl(pXState, (a1), (a2)); \
12313 } while (0)
12314
12315/**
12316 * Calls a AVX assembly implementation taking three visible arguments.
12317 *
12318 * There is one implicit zero'th argument, a pointer to the extended state.
12319 *
12320 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12321 * @param a1 The first extra argument.
12322 * @param a2 The second extra argument.
12323 * @param a3 The third extra argument.
12324 */
12325#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12326 do { \
12327 IEM_MC_PREPARE_AVX_USAGE(); \
12328 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12329 } while (0)
12330
12331/** @note Not for IOPL or IF testing. */
12332#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12333/** @note Not for IOPL or IF testing. */
12334#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12335/** @note Not for IOPL or IF testing. */
12336#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12337/** @note Not for IOPL or IF testing. */
12338#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12339/** @note Not for IOPL or IF testing. */
12340#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12341 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12342 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12343/** @note Not for IOPL or IF testing. */
12344#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12345 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12346 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12347/** @note Not for IOPL or IF testing. */
12348#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12349 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12350 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12351 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12352/** @note Not for IOPL or IF testing. */
12353#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12354 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12355 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12356 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12357#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12358#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12359#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12360/** @note Not for IOPL or IF testing. */
12361#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12362 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12363 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12364/** @note Not for IOPL or IF testing. */
12365#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12366 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12367 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12368/** @note Not for IOPL or IF testing. */
12369#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12370 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12371 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12372/** @note Not for IOPL or IF testing. */
12373#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12374 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12375 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12376/** @note Not for IOPL or IF testing. */
12377#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12378 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12379 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12380/** @note Not for IOPL or IF testing. */
12381#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12382 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12383 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12384#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12385#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12386
12387#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12388 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12389#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12390 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12391#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12392 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12393#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12394 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12395#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12396 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12397#define IEM_MC_IF_FCW_IM() \
12398 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12399
12400#define IEM_MC_ELSE() } else {
12401#define IEM_MC_ENDIF() } do {} while (0)
12402
12403/** @} */
12404
12405
12406/** @name Opcode Debug Helpers.
12407 * @{
12408 */
12409#ifdef VBOX_WITH_STATISTICS
12410# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12411#else
12412# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12413#endif
12414
12415#ifdef DEBUG
12416# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12417 do { \
12418 IEMOP_INC_STATS(a_Stats); \
12419 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12420 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12421 } while (0)
12422
12423# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12424 do { \
12425 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12426 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12427 (void)RT_CONCAT(OP_,a_Upper); \
12428 (void)(a_fDisHints); \
12429 (void)(a_fIemHints); \
12430 } while (0)
12431
12432# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12433 do { \
12434 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12435 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12436 (void)RT_CONCAT(OP_,a_Upper); \
12437 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12438 (void)(a_fDisHints); \
12439 (void)(a_fIemHints); \
12440 } while (0)
12441
12442# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12443 do { \
12444 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12445 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12446 (void)RT_CONCAT(OP_,a_Upper); \
12447 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12448 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12449 (void)(a_fDisHints); \
12450 (void)(a_fIemHints); \
12451 } while (0)
12452
12453# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12454 do { \
12455 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12456 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12457 (void)RT_CONCAT(OP_,a_Upper); \
12458 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12459 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12460 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12461 (void)(a_fDisHints); \
12462 (void)(a_fIemHints); \
12463 } while (0)
12464
12465# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12466 do { \
12467 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12468 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12469 (void)RT_CONCAT(OP_,a_Upper); \
12470 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12471 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12472 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12473 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12474 (void)(a_fDisHints); \
12475 (void)(a_fIemHints); \
12476 } while (0)
12477
12478#else
12479# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12480
12481# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12482 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12483# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12484 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12485# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12486 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12487# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12488 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12489# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12490 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12491
12492#endif
12493
12494#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12495 IEMOP_MNEMONIC0EX(a_Lower, \
12496 #a_Lower, \
12497 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12498#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12499 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12500 #a_Lower " " #a_Op1, \
12501 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12502#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12503 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12504 #a_Lower " " #a_Op1 "," #a_Op2, \
12505 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12506#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12507 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12508 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12509 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12510#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12511 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12512 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12513 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12514
12515/** @} */
12516
12517
12518/** @name Opcode Helpers.
12519 * @{
12520 */
12521
12522#ifdef IN_RING3
12523# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12524 do { \
12525 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12526 else \
12527 { \
12528 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12529 return IEMOP_RAISE_INVALID_OPCODE(); \
12530 } \
12531 } while (0)
12532#else
12533# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12534 do { \
12535 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12536 else return IEMOP_RAISE_INVALID_OPCODE(); \
12537 } while (0)
12538#endif
12539
12540/** The instruction requires a 186 or later. */
12541#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12542# define IEMOP_HLP_MIN_186() do { } while (0)
12543#else
12544# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12545#endif
12546
12547/** The instruction requires a 286 or later. */
12548#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12549# define IEMOP_HLP_MIN_286() do { } while (0)
12550#else
12551# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12552#endif
12553
12554/** The instruction requires a 386 or later. */
12555#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12556# define IEMOP_HLP_MIN_386() do { } while (0)
12557#else
12558# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12559#endif
12560
12561/** The instruction requires a 386 or later if the given expression is true. */
12562#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12563# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12564#else
12565# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12566#endif
12567
12568/** The instruction requires a 486 or later. */
12569#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12570# define IEMOP_HLP_MIN_486() do { } while (0)
12571#else
12572# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12573#endif
12574
12575/** The instruction requires a Pentium (586) or later. */
12576#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12577# define IEMOP_HLP_MIN_586() do { } while (0)
12578#else
12579# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12580#endif
12581
12582/** The instruction requires a PentiumPro (686) or later. */
12583#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12584# define IEMOP_HLP_MIN_686() do { } while (0)
12585#else
12586# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12587#endif
12588
12589
12590/** The instruction raises an \#UD in real and V8086 mode. */
12591#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12592 do \
12593 { \
12594 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12595 else return IEMOP_RAISE_INVALID_OPCODE(); \
12596 } while (0)
12597
12598/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12599 * 64-bit mode. */
12600#define IEMOP_HLP_NO_64BIT() \
12601 do \
12602 { \
12603 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12604 return IEMOP_RAISE_INVALID_OPCODE(); \
12605 } while (0)
12606
12607/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12608 * 64-bit mode. */
12609#define IEMOP_HLP_ONLY_64BIT() \
12610 do \
12611 { \
12612 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12613 return IEMOP_RAISE_INVALID_OPCODE(); \
12614 } while (0)
12615
12616/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12617#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12618 do \
12619 { \
12620 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12621 iemRecalEffOpSize64Default(pVCpu); \
12622 } while (0)
12623
12624/** The instruction has 64-bit operand size if 64-bit mode. */
12625#define IEMOP_HLP_64BIT_OP_SIZE() \
12626 do \
12627 { \
12628 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12629 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12630 } while (0)
12631
12632/** Only a REX prefix immediately preceeding the first opcode byte takes
12633 * effect. This macro helps ensuring this as well as logging bad guest code. */
12634#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12635 do \
12636 { \
12637 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12638 { \
12639 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12640 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12641 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12642 pVCpu->iem.s.uRexB = 0; \
12643 pVCpu->iem.s.uRexIndex = 0; \
12644 pVCpu->iem.s.uRexReg = 0; \
12645 iemRecalEffOpSize(pVCpu); \
12646 } \
12647 } while (0)
12648
12649/**
12650 * Done decoding.
12651 */
12652#define IEMOP_HLP_DONE_DECODING() \
12653 do \
12654 { \
12655 /*nothing for now, maybe later... */ \
12656 } while (0)
12657
12658/**
12659 * Done decoding, raise \#UD exception if lock prefix present.
12660 */
12661#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12662 do \
12663 { \
12664 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12665 { /* likely */ } \
12666 else \
12667 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12668 } while (0)
12669
12670
12671/**
12672 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12673 * repnz or size prefixes are present, or if in real or v8086 mode.
12674 */
12675#define IEMOP_HLP_DONE_VEX_DECODING() \
12676 do \
12677 { \
12678 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12679 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12680 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12681 { /* likely */ } \
12682 else \
12683 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12684 } while (0)
12685
12686/**
12687 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12688 * repnz or size prefixes are present, or if in real or v8086 mode.
12689 */
12690#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12691 do \
12692 { \
12693 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12694 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12695 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12696 && pVCpu->iem.s.uVexLength == 0)) \
12697 { /* likely */ } \
12698 else \
12699 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12700 } while (0)
12701
12702
12703/**
12704 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12705 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12706 * register 0, or if in real or v8086 mode.
12707 */
12708#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12709 do \
12710 { \
12711 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12712 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12713 && !pVCpu->iem.s.uVex3rdReg \
12714 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12715 { /* likely */ } \
12716 else \
12717 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12718 } while (0)
12719
12720/**
12721 * Done decoding VEX, no V, L=0.
12722 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12723 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12724 */
12725#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12726 do \
12727 { \
12728 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12729 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12730 && pVCpu->iem.s.uVexLength == 0 \
12731 && pVCpu->iem.s.uVex3rdReg == 0 \
12732 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12733 { /* likely */ } \
12734 else \
12735 return IEMOP_RAISE_INVALID_OPCODE(); \
12736 } while (0)
12737
12738#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12739 do \
12740 { \
12741 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12742 { /* likely */ } \
12743 else \
12744 { \
12745 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12746 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12747 } \
12748 } while (0)
12749#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12750 do \
12751 { \
12752 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12753 { /* likely */ } \
12754 else \
12755 { \
12756 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12757 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12758 } \
12759 } while (0)
12760
12761/**
12762 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12763 * are present.
12764 */
12765#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12766 do \
12767 { \
12768 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12769 { /* likely */ } \
12770 else \
12771 return IEMOP_RAISE_INVALID_OPCODE(); \
12772 } while (0)
12773
12774
12775#ifdef VBOX_WITH_NESTED_HWVIRT
12776/** Check and handles SVM nested-guest control & instruction intercept. */
12777# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12778 do \
12779 { \
12780 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12781 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12782 } while (0)
12783
12784/** Check and handle SVM nested-guest CR0 read intercept. */
12785# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12786 do \
12787 { \
12788 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12789 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12790 } while (0)
12791
12792#else /* !VBOX_WITH_NESTED_HWVIRT */
12793# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12794# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12795#endif /* !VBOX_WITH_NESTED_HWVIRT */
12796
12797
12798/**
12799 * Calculates the effective address of a ModR/M memory operand.
12800 *
12801 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12802 *
12803 * @return Strict VBox status code.
12804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12805 * @param bRm The ModRM byte.
12806 * @param cbImm The size of any immediate following the
12807 * effective address opcode bytes. Important for
12808 * RIP relative addressing.
12809 * @param pGCPtrEff Where to return the effective address.
12810 */
12811IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12812{
12813 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12814 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12815# define SET_SS_DEF() \
12816 do \
12817 { \
12818 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12819 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12820 } while (0)
12821
12822 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12823 {
12824/** @todo Check the effective address size crap! */
12825 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12826 {
12827 uint16_t u16EffAddr;
12828
12829 /* Handle the disp16 form with no registers first. */
12830 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12831 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12832 else
12833 {
12834 /* Get the displacment. */
12835 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12836 {
12837 case 0: u16EffAddr = 0; break;
12838 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12839 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12840 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12841 }
12842
12843 /* Add the base and index registers to the disp. */
12844 switch (bRm & X86_MODRM_RM_MASK)
12845 {
12846 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12847 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12848 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12849 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12850 case 4: u16EffAddr += pCtx->si; break;
12851 case 5: u16EffAddr += pCtx->di; break;
12852 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12853 case 7: u16EffAddr += pCtx->bx; break;
12854 }
12855 }
12856
12857 *pGCPtrEff = u16EffAddr;
12858 }
12859 else
12860 {
12861 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12862 uint32_t u32EffAddr;
12863
12864 /* Handle the disp32 form with no registers first. */
12865 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12866 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12867 else
12868 {
12869 /* Get the register (or SIB) value. */
12870 switch ((bRm & X86_MODRM_RM_MASK))
12871 {
12872 case 0: u32EffAddr = pCtx->eax; break;
12873 case 1: u32EffAddr = pCtx->ecx; break;
12874 case 2: u32EffAddr = pCtx->edx; break;
12875 case 3: u32EffAddr = pCtx->ebx; break;
12876 case 4: /* SIB */
12877 {
12878 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12879
12880 /* Get the index and scale it. */
12881 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12882 {
12883 case 0: u32EffAddr = pCtx->eax; break;
12884 case 1: u32EffAddr = pCtx->ecx; break;
12885 case 2: u32EffAddr = pCtx->edx; break;
12886 case 3: u32EffAddr = pCtx->ebx; break;
12887 case 4: u32EffAddr = 0; /*none */ break;
12888 case 5: u32EffAddr = pCtx->ebp; break;
12889 case 6: u32EffAddr = pCtx->esi; break;
12890 case 7: u32EffAddr = pCtx->edi; break;
12891 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12892 }
12893 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12894
12895 /* add base */
12896 switch (bSib & X86_SIB_BASE_MASK)
12897 {
12898 case 0: u32EffAddr += pCtx->eax; break;
12899 case 1: u32EffAddr += pCtx->ecx; break;
12900 case 2: u32EffAddr += pCtx->edx; break;
12901 case 3: u32EffAddr += pCtx->ebx; break;
12902 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12903 case 5:
12904 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12905 {
12906 u32EffAddr += pCtx->ebp;
12907 SET_SS_DEF();
12908 }
12909 else
12910 {
12911 uint32_t u32Disp;
12912 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12913 u32EffAddr += u32Disp;
12914 }
12915 break;
12916 case 6: u32EffAddr += pCtx->esi; break;
12917 case 7: u32EffAddr += pCtx->edi; break;
12918 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12919 }
12920 break;
12921 }
12922 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12923 case 6: u32EffAddr = pCtx->esi; break;
12924 case 7: u32EffAddr = pCtx->edi; break;
12925 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12926 }
12927
12928 /* Get and add the displacement. */
12929 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12930 {
12931 case 0:
12932 break;
12933 case 1:
12934 {
12935 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12936 u32EffAddr += i8Disp;
12937 break;
12938 }
12939 case 2:
12940 {
12941 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12942 u32EffAddr += u32Disp;
12943 break;
12944 }
12945 default:
12946 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12947 }
12948
12949 }
12950 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12951 *pGCPtrEff = u32EffAddr;
12952 else
12953 {
12954 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12955 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12956 }
12957 }
12958 }
12959 else
12960 {
12961 uint64_t u64EffAddr;
12962
12963 /* Handle the rip+disp32 form with no registers first. */
12964 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12965 {
12966 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12967 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12968 }
12969 else
12970 {
12971 /* Get the register (or SIB) value. */
12972 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12973 {
12974 case 0: u64EffAddr = pCtx->rax; break;
12975 case 1: u64EffAddr = pCtx->rcx; break;
12976 case 2: u64EffAddr = pCtx->rdx; break;
12977 case 3: u64EffAddr = pCtx->rbx; break;
12978 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12979 case 6: u64EffAddr = pCtx->rsi; break;
12980 case 7: u64EffAddr = pCtx->rdi; break;
12981 case 8: u64EffAddr = pCtx->r8; break;
12982 case 9: u64EffAddr = pCtx->r9; break;
12983 case 10: u64EffAddr = pCtx->r10; break;
12984 case 11: u64EffAddr = pCtx->r11; break;
12985 case 13: u64EffAddr = pCtx->r13; break;
12986 case 14: u64EffAddr = pCtx->r14; break;
12987 case 15: u64EffAddr = pCtx->r15; break;
12988 /* SIB */
12989 case 4:
12990 case 12:
12991 {
12992 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12993
12994 /* Get the index and scale it. */
12995 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12996 {
12997 case 0: u64EffAddr = pCtx->rax; break;
12998 case 1: u64EffAddr = pCtx->rcx; break;
12999 case 2: u64EffAddr = pCtx->rdx; break;
13000 case 3: u64EffAddr = pCtx->rbx; break;
13001 case 4: u64EffAddr = 0; /*none */ break;
13002 case 5: u64EffAddr = pCtx->rbp; break;
13003 case 6: u64EffAddr = pCtx->rsi; break;
13004 case 7: u64EffAddr = pCtx->rdi; break;
13005 case 8: u64EffAddr = pCtx->r8; break;
13006 case 9: u64EffAddr = pCtx->r9; break;
13007 case 10: u64EffAddr = pCtx->r10; break;
13008 case 11: u64EffAddr = pCtx->r11; break;
13009 case 12: u64EffAddr = pCtx->r12; break;
13010 case 13: u64EffAddr = pCtx->r13; break;
13011 case 14: u64EffAddr = pCtx->r14; break;
13012 case 15: u64EffAddr = pCtx->r15; break;
13013 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13014 }
13015 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13016
13017 /* add base */
13018 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13019 {
13020 case 0: u64EffAddr += pCtx->rax; break;
13021 case 1: u64EffAddr += pCtx->rcx; break;
13022 case 2: u64EffAddr += pCtx->rdx; break;
13023 case 3: u64EffAddr += pCtx->rbx; break;
13024 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13025 case 6: u64EffAddr += pCtx->rsi; break;
13026 case 7: u64EffAddr += pCtx->rdi; break;
13027 case 8: u64EffAddr += pCtx->r8; break;
13028 case 9: u64EffAddr += pCtx->r9; break;
13029 case 10: u64EffAddr += pCtx->r10; break;
13030 case 11: u64EffAddr += pCtx->r11; break;
13031 case 12: u64EffAddr += pCtx->r12; break;
13032 case 14: u64EffAddr += pCtx->r14; break;
13033 case 15: u64EffAddr += pCtx->r15; break;
13034 /* complicated encodings */
13035 case 5:
13036 case 13:
13037 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13038 {
13039 if (!pVCpu->iem.s.uRexB)
13040 {
13041 u64EffAddr += pCtx->rbp;
13042 SET_SS_DEF();
13043 }
13044 else
13045 u64EffAddr += pCtx->r13;
13046 }
13047 else
13048 {
13049 uint32_t u32Disp;
13050 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13051 u64EffAddr += (int32_t)u32Disp;
13052 }
13053 break;
13054 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13055 }
13056 break;
13057 }
13058 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13059 }
13060
13061 /* Get and add the displacement. */
13062 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13063 {
13064 case 0:
13065 break;
13066 case 1:
13067 {
13068 int8_t i8Disp;
13069 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13070 u64EffAddr += i8Disp;
13071 break;
13072 }
13073 case 2:
13074 {
13075 uint32_t u32Disp;
13076 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13077 u64EffAddr += (int32_t)u32Disp;
13078 break;
13079 }
13080 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13081 }
13082
13083 }
13084
13085 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13086 *pGCPtrEff = u64EffAddr;
13087 else
13088 {
13089 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13090 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13091 }
13092 }
13093
13094 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13095 return VINF_SUCCESS;
13096}
13097
13098
13099/**
13100 * Calculates the effective address of a ModR/M memory operand.
13101 *
13102 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13103 *
13104 * @return Strict VBox status code.
13105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13106 * @param bRm The ModRM byte.
13107 * @param cbImm The size of any immediate following the
13108 * effective address opcode bytes. Important for
13109 * RIP relative addressing.
13110 * @param pGCPtrEff Where to return the effective address.
13111 * @param offRsp RSP displacement.
13112 */
13113IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13114{
13115 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13116 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13117# define SET_SS_DEF() \
13118 do \
13119 { \
13120 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13121 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13122 } while (0)
13123
13124 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13125 {
13126/** @todo Check the effective address size crap! */
13127 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13128 {
13129 uint16_t u16EffAddr;
13130
13131 /* Handle the disp16 form with no registers first. */
13132 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13133 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13134 else
13135 {
13136 /* Get the displacment. */
13137 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13138 {
13139 case 0: u16EffAddr = 0; break;
13140 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13141 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13142 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13143 }
13144
13145 /* Add the base and index registers to the disp. */
13146 switch (bRm & X86_MODRM_RM_MASK)
13147 {
13148 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13149 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13150 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13151 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13152 case 4: u16EffAddr += pCtx->si; break;
13153 case 5: u16EffAddr += pCtx->di; break;
13154 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13155 case 7: u16EffAddr += pCtx->bx; break;
13156 }
13157 }
13158
13159 *pGCPtrEff = u16EffAddr;
13160 }
13161 else
13162 {
13163 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13164 uint32_t u32EffAddr;
13165
13166 /* Handle the disp32 form with no registers first. */
13167 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13168 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13169 else
13170 {
13171 /* Get the register (or SIB) value. */
13172 switch ((bRm & X86_MODRM_RM_MASK))
13173 {
13174 case 0: u32EffAddr = pCtx->eax; break;
13175 case 1: u32EffAddr = pCtx->ecx; break;
13176 case 2: u32EffAddr = pCtx->edx; break;
13177 case 3: u32EffAddr = pCtx->ebx; break;
13178 case 4: /* SIB */
13179 {
13180 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13181
13182 /* Get the index and scale it. */
13183 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13184 {
13185 case 0: u32EffAddr = pCtx->eax; break;
13186 case 1: u32EffAddr = pCtx->ecx; break;
13187 case 2: u32EffAddr = pCtx->edx; break;
13188 case 3: u32EffAddr = pCtx->ebx; break;
13189 case 4: u32EffAddr = 0; /*none */ break;
13190 case 5: u32EffAddr = pCtx->ebp; break;
13191 case 6: u32EffAddr = pCtx->esi; break;
13192 case 7: u32EffAddr = pCtx->edi; break;
13193 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13194 }
13195 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13196
13197 /* add base */
13198 switch (bSib & X86_SIB_BASE_MASK)
13199 {
13200 case 0: u32EffAddr += pCtx->eax; break;
13201 case 1: u32EffAddr += pCtx->ecx; break;
13202 case 2: u32EffAddr += pCtx->edx; break;
13203 case 3: u32EffAddr += pCtx->ebx; break;
13204 case 4:
13205 u32EffAddr += pCtx->esp + offRsp;
13206 SET_SS_DEF();
13207 break;
13208 case 5:
13209 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13210 {
13211 u32EffAddr += pCtx->ebp;
13212 SET_SS_DEF();
13213 }
13214 else
13215 {
13216 uint32_t u32Disp;
13217 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13218 u32EffAddr += u32Disp;
13219 }
13220 break;
13221 case 6: u32EffAddr += pCtx->esi; break;
13222 case 7: u32EffAddr += pCtx->edi; break;
13223 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13224 }
13225 break;
13226 }
13227 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13228 case 6: u32EffAddr = pCtx->esi; break;
13229 case 7: u32EffAddr = pCtx->edi; break;
13230 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13231 }
13232
13233 /* Get and add the displacement. */
13234 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13235 {
13236 case 0:
13237 break;
13238 case 1:
13239 {
13240 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13241 u32EffAddr += i8Disp;
13242 break;
13243 }
13244 case 2:
13245 {
13246 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13247 u32EffAddr += u32Disp;
13248 break;
13249 }
13250 default:
13251 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13252 }
13253
13254 }
13255 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13256 *pGCPtrEff = u32EffAddr;
13257 else
13258 {
13259 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13260 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13261 }
13262 }
13263 }
13264 else
13265 {
13266 uint64_t u64EffAddr;
13267
13268 /* Handle the rip+disp32 form with no registers first. */
13269 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13270 {
13271 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13272 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13273 }
13274 else
13275 {
13276 /* Get the register (or SIB) value. */
13277 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13278 {
13279 case 0: u64EffAddr = pCtx->rax; break;
13280 case 1: u64EffAddr = pCtx->rcx; break;
13281 case 2: u64EffAddr = pCtx->rdx; break;
13282 case 3: u64EffAddr = pCtx->rbx; break;
13283 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13284 case 6: u64EffAddr = pCtx->rsi; break;
13285 case 7: u64EffAddr = pCtx->rdi; break;
13286 case 8: u64EffAddr = pCtx->r8; break;
13287 case 9: u64EffAddr = pCtx->r9; break;
13288 case 10: u64EffAddr = pCtx->r10; break;
13289 case 11: u64EffAddr = pCtx->r11; break;
13290 case 13: u64EffAddr = pCtx->r13; break;
13291 case 14: u64EffAddr = pCtx->r14; break;
13292 case 15: u64EffAddr = pCtx->r15; break;
13293 /* SIB */
13294 case 4:
13295 case 12:
13296 {
13297 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13298
13299 /* Get the index and scale it. */
13300 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13301 {
13302 case 0: u64EffAddr = pCtx->rax; break;
13303 case 1: u64EffAddr = pCtx->rcx; break;
13304 case 2: u64EffAddr = pCtx->rdx; break;
13305 case 3: u64EffAddr = pCtx->rbx; break;
13306 case 4: u64EffAddr = 0; /*none */ break;
13307 case 5: u64EffAddr = pCtx->rbp; break;
13308 case 6: u64EffAddr = pCtx->rsi; break;
13309 case 7: u64EffAddr = pCtx->rdi; break;
13310 case 8: u64EffAddr = pCtx->r8; break;
13311 case 9: u64EffAddr = pCtx->r9; break;
13312 case 10: u64EffAddr = pCtx->r10; break;
13313 case 11: u64EffAddr = pCtx->r11; break;
13314 case 12: u64EffAddr = pCtx->r12; break;
13315 case 13: u64EffAddr = pCtx->r13; break;
13316 case 14: u64EffAddr = pCtx->r14; break;
13317 case 15: u64EffAddr = pCtx->r15; break;
13318 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13319 }
13320 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13321
13322 /* add base */
13323 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13324 {
13325 case 0: u64EffAddr += pCtx->rax; break;
13326 case 1: u64EffAddr += pCtx->rcx; break;
13327 case 2: u64EffAddr += pCtx->rdx; break;
13328 case 3: u64EffAddr += pCtx->rbx; break;
13329 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13330 case 6: u64EffAddr += pCtx->rsi; break;
13331 case 7: u64EffAddr += pCtx->rdi; break;
13332 case 8: u64EffAddr += pCtx->r8; break;
13333 case 9: u64EffAddr += pCtx->r9; break;
13334 case 10: u64EffAddr += pCtx->r10; break;
13335 case 11: u64EffAddr += pCtx->r11; break;
13336 case 12: u64EffAddr += pCtx->r12; break;
13337 case 14: u64EffAddr += pCtx->r14; break;
13338 case 15: u64EffAddr += pCtx->r15; break;
13339 /* complicated encodings */
13340 case 5:
13341 case 13:
13342 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13343 {
13344 if (!pVCpu->iem.s.uRexB)
13345 {
13346 u64EffAddr += pCtx->rbp;
13347 SET_SS_DEF();
13348 }
13349 else
13350 u64EffAddr += pCtx->r13;
13351 }
13352 else
13353 {
13354 uint32_t u32Disp;
13355 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13356 u64EffAddr += (int32_t)u32Disp;
13357 }
13358 break;
13359 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13360 }
13361 break;
13362 }
13363 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13364 }
13365
13366 /* Get and add the displacement. */
13367 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13368 {
13369 case 0:
13370 break;
13371 case 1:
13372 {
13373 int8_t i8Disp;
13374 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13375 u64EffAddr += i8Disp;
13376 break;
13377 }
13378 case 2:
13379 {
13380 uint32_t u32Disp;
13381 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13382 u64EffAddr += (int32_t)u32Disp;
13383 break;
13384 }
13385 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13386 }
13387
13388 }
13389
13390 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13391 *pGCPtrEff = u64EffAddr;
13392 else
13393 {
13394 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13395 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13396 }
13397 }
13398
13399 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13400 return VINF_SUCCESS;
13401}
13402
13403
13404#ifdef IEM_WITH_SETJMP
13405/**
13406 * Calculates the effective address of a ModR/M memory operand.
13407 *
13408 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13409 *
13410 * May longjmp on internal error.
13411 *
13412 * @return The effective address.
13413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13414 * @param bRm The ModRM byte.
13415 * @param cbImm The size of any immediate following the
13416 * effective address opcode bytes. Important for
13417 * RIP relative addressing.
13418 */
13419IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13420{
13421 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13422 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13423# define SET_SS_DEF() \
13424 do \
13425 { \
13426 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13427 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13428 } while (0)
13429
13430 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13431 {
13432/** @todo Check the effective address size crap! */
13433 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13434 {
13435 uint16_t u16EffAddr;
13436
13437 /* Handle the disp16 form with no registers first. */
13438 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13439 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13440 else
13441 {
13442 /* Get the displacment. */
13443 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13444 {
13445 case 0: u16EffAddr = 0; break;
13446 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13447 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13448 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13449 }
13450
13451 /* Add the base and index registers to the disp. */
13452 switch (bRm & X86_MODRM_RM_MASK)
13453 {
13454 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13455 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13456 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13457 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13458 case 4: u16EffAddr += pCtx->si; break;
13459 case 5: u16EffAddr += pCtx->di; break;
13460 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13461 case 7: u16EffAddr += pCtx->bx; break;
13462 }
13463 }
13464
13465 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13466 return u16EffAddr;
13467 }
13468
13469 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13470 uint32_t u32EffAddr;
13471
13472 /* Handle the disp32 form with no registers first. */
13473 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13474 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13475 else
13476 {
13477 /* Get the register (or SIB) value. */
13478 switch ((bRm & X86_MODRM_RM_MASK))
13479 {
13480 case 0: u32EffAddr = pCtx->eax; break;
13481 case 1: u32EffAddr = pCtx->ecx; break;
13482 case 2: u32EffAddr = pCtx->edx; break;
13483 case 3: u32EffAddr = pCtx->ebx; break;
13484 case 4: /* SIB */
13485 {
13486 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13487
13488 /* Get the index and scale it. */
13489 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13490 {
13491 case 0: u32EffAddr = pCtx->eax; break;
13492 case 1: u32EffAddr = pCtx->ecx; break;
13493 case 2: u32EffAddr = pCtx->edx; break;
13494 case 3: u32EffAddr = pCtx->ebx; break;
13495 case 4: u32EffAddr = 0; /*none */ break;
13496 case 5: u32EffAddr = pCtx->ebp; break;
13497 case 6: u32EffAddr = pCtx->esi; break;
13498 case 7: u32EffAddr = pCtx->edi; break;
13499 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13500 }
13501 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13502
13503 /* add base */
13504 switch (bSib & X86_SIB_BASE_MASK)
13505 {
13506 case 0: u32EffAddr += pCtx->eax; break;
13507 case 1: u32EffAddr += pCtx->ecx; break;
13508 case 2: u32EffAddr += pCtx->edx; break;
13509 case 3: u32EffAddr += pCtx->ebx; break;
13510 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13511 case 5:
13512 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13513 {
13514 u32EffAddr += pCtx->ebp;
13515 SET_SS_DEF();
13516 }
13517 else
13518 {
13519 uint32_t u32Disp;
13520 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13521 u32EffAddr += u32Disp;
13522 }
13523 break;
13524 case 6: u32EffAddr += pCtx->esi; break;
13525 case 7: u32EffAddr += pCtx->edi; break;
13526 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13527 }
13528 break;
13529 }
13530 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13531 case 6: u32EffAddr = pCtx->esi; break;
13532 case 7: u32EffAddr = pCtx->edi; break;
13533 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13534 }
13535
13536 /* Get and add the displacement. */
13537 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13538 {
13539 case 0:
13540 break;
13541 case 1:
13542 {
13543 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13544 u32EffAddr += i8Disp;
13545 break;
13546 }
13547 case 2:
13548 {
13549 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13550 u32EffAddr += u32Disp;
13551 break;
13552 }
13553 default:
13554 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13555 }
13556 }
13557
13558 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13559 {
13560 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13561 return u32EffAddr;
13562 }
13563 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13564 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13565 return u32EffAddr & UINT16_MAX;
13566 }
13567
13568 uint64_t u64EffAddr;
13569
13570 /* Handle the rip+disp32 form with no registers first. */
13571 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13572 {
13573 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13574 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13575 }
13576 else
13577 {
13578 /* Get the register (or SIB) value. */
13579 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13580 {
13581 case 0: u64EffAddr = pCtx->rax; break;
13582 case 1: u64EffAddr = pCtx->rcx; break;
13583 case 2: u64EffAddr = pCtx->rdx; break;
13584 case 3: u64EffAddr = pCtx->rbx; break;
13585 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13586 case 6: u64EffAddr = pCtx->rsi; break;
13587 case 7: u64EffAddr = pCtx->rdi; break;
13588 case 8: u64EffAddr = pCtx->r8; break;
13589 case 9: u64EffAddr = pCtx->r9; break;
13590 case 10: u64EffAddr = pCtx->r10; break;
13591 case 11: u64EffAddr = pCtx->r11; break;
13592 case 13: u64EffAddr = pCtx->r13; break;
13593 case 14: u64EffAddr = pCtx->r14; break;
13594 case 15: u64EffAddr = pCtx->r15; break;
13595 /* SIB */
13596 case 4:
13597 case 12:
13598 {
13599 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13600
13601 /* Get the index and scale it. */
13602 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13603 {
13604 case 0: u64EffAddr = pCtx->rax; break;
13605 case 1: u64EffAddr = pCtx->rcx; break;
13606 case 2: u64EffAddr = pCtx->rdx; break;
13607 case 3: u64EffAddr = pCtx->rbx; break;
13608 case 4: u64EffAddr = 0; /*none */ break;
13609 case 5: u64EffAddr = pCtx->rbp; break;
13610 case 6: u64EffAddr = pCtx->rsi; break;
13611 case 7: u64EffAddr = pCtx->rdi; break;
13612 case 8: u64EffAddr = pCtx->r8; break;
13613 case 9: u64EffAddr = pCtx->r9; break;
13614 case 10: u64EffAddr = pCtx->r10; break;
13615 case 11: u64EffAddr = pCtx->r11; break;
13616 case 12: u64EffAddr = pCtx->r12; break;
13617 case 13: u64EffAddr = pCtx->r13; break;
13618 case 14: u64EffAddr = pCtx->r14; break;
13619 case 15: u64EffAddr = pCtx->r15; break;
13620 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13621 }
13622 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13623
13624 /* add base */
13625 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13626 {
13627 case 0: u64EffAddr += pCtx->rax; break;
13628 case 1: u64EffAddr += pCtx->rcx; break;
13629 case 2: u64EffAddr += pCtx->rdx; break;
13630 case 3: u64EffAddr += pCtx->rbx; break;
13631 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13632 case 6: u64EffAddr += pCtx->rsi; break;
13633 case 7: u64EffAddr += pCtx->rdi; break;
13634 case 8: u64EffAddr += pCtx->r8; break;
13635 case 9: u64EffAddr += pCtx->r9; break;
13636 case 10: u64EffAddr += pCtx->r10; break;
13637 case 11: u64EffAddr += pCtx->r11; break;
13638 case 12: u64EffAddr += pCtx->r12; break;
13639 case 14: u64EffAddr += pCtx->r14; break;
13640 case 15: u64EffAddr += pCtx->r15; break;
13641 /* complicated encodings */
13642 case 5:
13643 case 13:
13644 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13645 {
13646 if (!pVCpu->iem.s.uRexB)
13647 {
13648 u64EffAddr += pCtx->rbp;
13649 SET_SS_DEF();
13650 }
13651 else
13652 u64EffAddr += pCtx->r13;
13653 }
13654 else
13655 {
13656 uint32_t u32Disp;
13657 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13658 u64EffAddr += (int32_t)u32Disp;
13659 }
13660 break;
13661 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13662 }
13663 break;
13664 }
13665 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13666 }
13667
13668 /* Get and add the displacement. */
13669 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13670 {
13671 case 0:
13672 break;
13673 case 1:
13674 {
13675 int8_t i8Disp;
13676 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13677 u64EffAddr += i8Disp;
13678 break;
13679 }
13680 case 2:
13681 {
13682 uint32_t u32Disp;
13683 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13684 u64EffAddr += (int32_t)u32Disp;
13685 break;
13686 }
13687 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13688 }
13689
13690 }
13691
13692 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13693 {
13694 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13695 return u64EffAddr;
13696 }
13697 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13698 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13699 return u64EffAddr & UINT32_MAX;
13700}
13701#endif /* IEM_WITH_SETJMP */
13702
13703
13704/** @} */
13705
13706
13707
13708/*
13709 * Include the instructions
13710 */
13711#include "IEMAllInstructions.cpp.h"
13712
13713
13714
13715
13716#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13717
13718/**
13719 * Sets up execution verification mode.
13720 */
13721IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13722{
13723 PVMCPU pVCpu = pVCpu;
13724 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13725
13726 /*
13727 * Always note down the address of the current instruction.
13728 */
13729 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13730 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13731
13732 /*
13733 * Enable verification and/or logging.
13734 */
13735 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13736 if ( fNewNoRem
13737 && ( 0
13738#if 0 /* auto enable on first paged protected mode interrupt */
13739 || ( pOrgCtx->eflags.Bits.u1IF
13740 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13741 && TRPMHasTrap(pVCpu)
13742 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13743#endif
13744#if 0
13745 || ( pOrgCtx->cs == 0x10
13746 && ( pOrgCtx->rip == 0x90119e3e
13747 || pOrgCtx->rip == 0x901d9810)
13748#endif
13749#if 0 /* Auto enable DSL - FPU stuff. */
13750 || ( pOrgCtx->cs == 0x10
13751 && (// pOrgCtx->rip == 0xc02ec07f
13752 //|| pOrgCtx->rip == 0xc02ec082
13753 //|| pOrgCtx->rip == 0xc02ec0c9
13754 0
13755 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13756#endif
13757#if 0 /* Auto enable DSL - fstp st0 stuff. */
13758 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13759#endif
13760#if 0
13761 || pOrgCtx->rip == 0x9022bb3a
13762#endif
13763#if 0
13764 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13765#endif
13766#if 0
13767 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13768 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13769#endif
13770#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13771 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13772 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13773 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13774#endif
13775#if 0 /* NT4SP1 - xadd early boot. */
13776 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13777#endif
13778#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13779 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13780#endif
13781#if 0 /* NT4SP1 - cmpxchg (AMD). */
13782 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13783#endif
13784#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13785 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13786#endif
13787#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13788 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13789
13790#endif
13791#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13792 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13793
13794#endif
13795#if 0 /* NT4SP1 - frstor [ecx] */
13796 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13797#endif
13798#if 0 /* xxxxxx - All long mode code. */
13799 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13800#endif
13801#if 0 /* rep movsq linux 3.7 64-bit boot. */
13802 || (pOrgCtx->rip == 0x0000000000100241)
13803#endif
13804#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13805 || (pOrgCtx->rip == 0x000000000215e240)
13806#endif
13807#if 0 /* DOS's size-overridden iret to v8086. */
13808 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13809#endif
13810 )
13811 )
13812 {
13813 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13814 RTLogFlags(NULL, "enabled");
13815 fNewNoRem = false;
13816 }
13817 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13818 {
13819 pVCpu->iem.s.fNoRem = fNewNoRem;
13820 if (!fNewNoRem)
13821 {
13822 LogAlways(("Enabling verification mode!\n"));
13823 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13824 }
13825 else
13826 LogAlways(("Disabling verification mode!\n"));
13827 }
13828
13829 /*
13830 * Switch state.
13831 */
13832 if (IEM_VERIFICATION_ENABLED(pVCpu))
13833 {
13834 static CPUMCTX s_DebugCtx; /* Ugly! */
13835
13836 s_DebugCtx = *pOrgCtx;
13837 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13838 }
13839
13840 /*
13841 * See if there is an interrupt pending in TRPM and inject it if we can.
13842 */
13843 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13844 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
13845#if defined(VBOX_WITH_NESTED_HWVIRT)
13846 bool fIntrEnabled = pOrgCtx->hwvirt.svm.fGif;
13847 if (fIntrEnabled)
13848 {
13849 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
13850 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
13851 else
13852 fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13853 }
13854#else
13855 bool fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13856#endif
13857 if ( fIntrEnabled
13858 && TRPMHasTrap(pVCpu)
13859 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13860 {
13861 uint8_t u8TrapNo;
13862 TRPMEVENT enmType;
13863 RTGCUINT uErrCode;
13864 RTGCPTR uCr2;
13865 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13866 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13867 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13868 TRPMResetTrap(pVCpu);
13869 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13870 }
13871
13872 /*
13873 * Reset the counters.
13874 */
13875 pVCpu->iem.s.cIOReads = 0;
13876 pVCpu->iem.s.cIOWrites = 0;
13877 pVCpu->iem.s.fIgnoreRaxRdx = false;
13878 pVCpu->iem.s.fOverlappingMovs = false;
13879 pVCpu->iem.s.fProblematicMemory = false;
13880 pVCpu->iem.s.fUndefinedEFlags = 0;
13881
13882 if (IEM_VERIFICATION_ENABLED(pVCpu))
13883 {
13884 /*
13885 * Free all verification records.
13886 */
13887 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13888 pVCpu->iem.s.pIemEvtRecHead = NULL;
13889 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13890 do
13891 {
13892 while (pEvtRec)
13893 {
13894 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13895 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13896 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13897 pEvtRec = pNext;
13898 }
13899 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13900 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13901 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13902 } while (pEvtRec);
13903 }
13904}
13905
13906
13907/**
13908 * Allocate an event record.
13909 * @returns Pointer to a record.
13910 */
13911IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13912{
13913 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13914 return NULL;
13915
13916 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13917 if (pEvtRec)
13918 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13919 else
13920 {
13921 if (!pVCpu->iem.s.ppIemEvtRecNext)
13922 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13923
13924 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13925 if (!pEvtRec)
13926 return NULL;
13927 }
13928 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13929 pEvtRec->pNext = NULL;
13930 return pEvtRec;
13931}
13932
13933
13934/**
13935 * IOMMMIORead notification.
13936 */
13937VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
13938{
13939 PVMCPU pVCpu = VMMGetCpu(pVM);
13940 if (!pVCpu)
13941 return;
13942 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13943 if (!pEvtRec)
13944 return;
13945 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
13946 pEvtRec->u.RamRead.GCPhys = GCPhys;
13947 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
13948 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13949 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13950}
13951
13952
13953/**
13954 * IOMMMIOWrite notification.
13955 */
13956VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
13957{
13958 PVMCPU pVCpu = VMMGetCpu(pVM);
13959 if (!pVCpu)
13960 return;
13961 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13962 if (!pEvtRec)
13963 return;
13964 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
13965 pEvtRec->u.RamWrite.GCPhys = GCPhys;
13966 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
13967 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
13968 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
13969 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
13970 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
13971 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13972 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13973}
13974
13975
13976/**
13977 * IOMIOPortRead notification.
13978 */
13979VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
13980{
13981 PVMCPU pVCpu = VMMGetCpu(pVM);
13982 if (!pVCpu)
13983 return;
13984 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13985 if (!pEvtRec)
13986 return;
13987 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13988 pEvtRec->u.IOPortRead.Port = Port;
13989 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13990 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13991 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13992}
13993
13994/**
13995 * IOMIOPortWrite notification.
13996 */
13997VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13998{
13999 PVMCPU pVCpu = VMMGetCpu(pVM);
14000 if (!pVCpu)
14001 return;
14002 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14003 if (!pEvtRec)
14004 return;
14005 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14006 pEvtRec->u.IOPortWrite.Port = Port;
14007 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14008 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14009 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14010 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14011}
14012
14013
14014VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
14015{
14016 PVMCPU pVCpu = VMMGetCpu(pVM);
14017 if (!pVCpu)
14018 return;
14019 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14020 if (!pEvtRec)
14021 return;
14022 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
14023 pEvtRec->u.IOPortStrRead.Port = Port;
14024 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
14025 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
14026 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14027 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14028}
14029
14030
14031VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
14032{
14033 PVMCPU pVCpu = VMMGetCpu(pVM);
14034 if (!pVCpu)
14035 return;
14036 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14037 if (!pEvtRec)
14038 return;
14039 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
14040 pEvtRec->u.IOPortStrWrite.Port = Port;
14041 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
14042 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
14043 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14044 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14045}
14046
14047
14048/**
14049 * Fakes and records an I/O port read.
14050 *
14051 * @returns VINF_SUCCESS.
14052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14053 * @param Port The I/O port.
14054 * @param pu32Value Where to store the fake value.
14055 * @param cbValue The size of the access.
14056 */
14057IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14058{
14059 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14060 if (pEvtRec)
14061 {
14062 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14063 pEvtRec->u.IOPortRead.Port = Port;
14064 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14065 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14066 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14067 }
14068 pVCpu->iem.s.cIOReads++;
14069 *pu32Value = 0xcccccccc;
14070 return VINF_SUCCESS;
14071}
14072
14073
14074/**
14075 * Fakes and records an I/O port write.
14076 *
14077 * @returns VINF_SUCCESS.
14078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14079 * @param Port The I/O port.
14080 * @param u32Value The value being written.
14081 * @param cbValue The size of the access.
14082 */
14083IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14084{
14085 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14086 if (pEvtRec)
14087 {
14088 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14089 pEvtRec->u.IOPortWrite.Port = Port;
14090 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14091 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14092 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14093 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14094 }
14095 pVCpu->iem.s.cIOWrites++;
14096 return VINF_SUCCESS;
14097}
14098
14099
14100/**
14101 * Used to add extra details about a stub case.
14102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14103 */
14104IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
14105{
14106 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14107 PVM pVM = pVCpu->CTX_SUFF(pVM);
14108 PVMCPU pVCpu = pVCpu;
14109 char szRegs[4096];
14110 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
14111 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
14112 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
14113 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
14114 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
14115 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
14116 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
14117 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
14118 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
14119 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
14120 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
14121 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
14122 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
14123 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
14124 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
14125 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
14126 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
14127 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
14128 " efer=%016VR{efer}\n"
14129 " pat=%016VR{pat}\n"
14130 " sf_mask=%016VR{sf_mask}\n"
14131 "krnl_gs_base=%016VR{krnl_gs_base}\n"
14132 " lstar=%016VR{lstar}\n"
14133 " star=%016VR{star} cstar=%016VR{cstar}\n"
14134 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
14135 );
14136
14137 char szInstr1[256];
14138 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
14139 DBGF_DISAS_FLAGS_DEFAULT_MODE,
14140 szInstr1, sizeof(szInstr1), NULL);
14141 char szInstr2[256];
14142 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
14143 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14144 szInstr2, sizeof(szInstr2), NULL);
14145
14146 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
14147}
14148
14149
14150/**
14151 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
14152 * dump to the assertion info.
14153 *
14154 * @param pEvtRec The record to dump.
14155 */
14156IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
14157{
14158 switch (pEvtRec->enmEvent)
14159 {
14160 case IEMVERIFYEVENT_IOPORT_READ:
14161 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
14162 pEvtRec->u.IOPortWrite.Port,
14163 pEvtRec->u.IOPortWrite.cbValue);
14164 break;
14165 case IEMVERIFYEVENT_IOPORT_WRITE:
14166 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
14167 pEvtRec->u.IOPortWrite.Port,
14168 pEvtRec->u.IOPortWrite.cbValue,
14169 pEvtRec->u.IOPortWrite.u32Value);
14170 break;
14171 case IEMVERIFYEVENT_IOPORT_STR_READ:
14172 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
14173 pEvtRec->u.IOPortStrWrite.Port,
14174 pEvtRec->u.IOPortStrWrite.cbValue,
14175 pEvtRec->u.IOPortStrWrite.cTransfers);
14176 break;
14177 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14178 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
14179 pEvtRec->u.IOPortStrWrite.Port,
14180 pEvtRec->u.IOPortStrWrite.cbValue,
14181 pEvtRec->u.IOPortStrWrite.cTransfers);
14182 break;
14183 case IEMVERIFYEVENT_RAM_READ:
14184 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
14185 pEvtRec->u.RamRead.GCPhys,
14186 pEvtRec->u.RamRead.cb);
14187 break;
14188 case IEMVERIFYEVENT_RAM_WRITE:
14189 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
14190 pEvtRec->u.RamWrite.GCPhys,
14191 pEvtRec->u.RamWrite.cb,
14192 (int)pEvtRec->u.RamWrite.cb,
14193 pEvtRec->u.RamWrite.ab);
14194 break;
14195 default:
14196 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14197 break;
14198 }
14199}
14200
14201
14202/**
14203 * Raises an assertion on the specified record, showing the given message with
14204 * a record dump attached.
14205 *
14206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14207 * @param pEvtRec1 The first record.
14208 * @param pEvtRec2 The second record.
14209 * @param pszMsg The message explaining why we're asserting.
14210 */
14211IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14212{
14213 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14214 iemVerifyAssertAddRecordDump(pEvtRec1);
14215 iemVerifyAssertAddRecordDump(pEvtRec2);
14216 iemVerifyAssertMsg2(pVCpu);
14217 RTAssertPanic();
14218}
14219
14220
14221/**
14222 * Raises an assertion on the specified record, showing the given message with
14223 * a record dump attached.
14224 *
14225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14226 * @param pEvtRec1 The first record.
14227 * @param pszMsg The message explaining why we're asserting.
14228 */
14229IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14230{
14231 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14232 iemVerifyAssertAddRecordDump(pEvtRec);
14233 iemVerifyAssertMsg2(pVCpu);
14234 RTAssertPanic();
14235}
14236
14237
14238/**
14239 * Verifies a write record.
14240 *
14241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14242 * @param pEvtRec The write record.
14243 * @param fRem Set if REM was doing the other executing. If clear
14244 * it was HM.
14245 */
14246IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14247{
14248 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14249 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14250 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14251 if ( RT_FAILURE(rc)
14252 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14253 {
14254 /* fend off ins */
14255 if ( !pVCpu->iem.s.cIOReads
14256 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14257 || ( pEvtRec->u.RamWrite.cb != 1
14258 && pEvtRec->u.RamWrite.cb != 2
14259 && pEvtRec->u.RamWrite.cb != 4) )
14260 {
14261 /* fend off ROMs and MMIO */
14262 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14263 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14264 {
14265 /* fend off fxsave */
14266 if (pEvtRec->u.RamWrite.cb != 512)
14267 {
14268 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14269 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14270 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14271 RTAssertMsg2Add("%s: %.*Rhxs\n"
14272 "iem: %.*Rhxs\n",
14273 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14274 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14275 iemVerifyAssertAddRecordDump(pEvtRec);
14276 iemVerifyAssertMsg2(pVCpu);
14277 RTAssertPanic();
14278 }
14279 }
14280 }
14281 }
14282
14283}
14284
14285/**
14286 * Performs the post-execution verfication checks.
14287 */
14288IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14289{
14290 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14291 return rcStrictIem;
14292
14293 /*
14294 * Switch back the state.
14295 */
14296 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14297 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14298 Assert(pOrgCtx != pDebugCtx);
14299 IEM_GET_CTX(pVCpu) = pOrgCtx;
14300
14301 /*
14302 * Execute the instruction in REM.
14303 */
14304 bool fRem = false;
14305 PVM pVM = pVCpu->CTX_SUFF(pVM);
14306 PVMCPU pVCpu = pVCpu;
14307 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14308#ifdef IEM_VERIFICATION_MODE_FULL_HM
14309 if ( HMIsEnabled(pVM)
14310 && pVCpu->iem.s.cIOReads == 0
14311 && pVCpu->iem.s.cIOWrites == 0
14312 && !pVCpu->iem.s.fProblematicMemory)
14313 {
14314 uint64_t uStartRip = pOrgCtx->rip;
14315 unsigned iLoops = 0;
14316 do
14317 {
14318 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14319 iLoops++;
14320 } while ( rc == VINF_SUCCESS
14321 || ( rc == VINF_EM_DBG_STEPPED
14322 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14323 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14324 || ( pOrgCtx->rip != pDebugCtx->rip
14325 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14326 && iLoops < 8) );
14327 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14328 rc = VINF_SUCCESS;
14329 }
14330#endif
14331 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14332 || rc == VINF_IOM_R3_IOPORT_READ
14333 || rc == VINF_IOM_R3_IOPORT_WRITE
14334 || rc == VINF_IOM_R3_MMIO_READ
14335 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14336 || rc == VINF_IOM_R3_MMIO_WRITE
14337 || rc == VINF_CPUM_R3_MSR_READ
14338 || rc == VINF_CPUM_R3_MSR_WRITE
14339 || rc == VINF_EM_RESCHEDULE
14340 )
14341 {
14342 EMRemLock(pVM);
14343 rc = REMR3EmulateInstruction(pVM, pVCpu);
14344 AssertRC(rc);
14345 EMRemUnlock(pVM);
14346 fRem = true;
14347 }
14348
14349# if 1 /* Skip unimplemented instructions for now. */
14350 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14351 {
14352 IEM_GET_CTX(pVCpu) = pOrgCtx;
14353 if (rc == VINF_EM_DBG_STEPPED)
14354 return VINF_SUCCESS;
14355 return rc;
14356 }
14357# endif
14358
14359 /*
14360 * Compare the register states.
14361 */
14362 unsigned cDiffs = 0;
14363 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14364 {
14365 //Log(("REM and IEM ends up with different registers!\n"));
14366 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14367
14368# define CHECK_FIELD(a_Field) \
14369 do \
14370 { \
14371 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14372 { \
14373 switch (sizeof(pOrgCtx->a_Field)) \
14374 { \
14375 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14376 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14377 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14378 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14379 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14380 } \
14381 cDiffs++; \
14382 } \
14383 } while (0)
14384# define CHECK_XSTATE_FIELD(a_Field) \
14385 do \
14386 { \
14387 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14388 { \
14389 switch (sizeof(pOrgXState->a_Field)) \
14390 { \
14391 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14392 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14393 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14394 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14395 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14396 } \
14397 cDiffs++; \
14398 } \
14399 } while (0)
14400
14401# define CHECK_BIT_FIELD(a_Field) \
14402 do \
14403 { \
14404 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14405 { \
14406 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14407 cDiffs++; \
14408 } \
14409 } while (0)
14410
14411# define CHECK_SEL(a_Sel) \
14412 do \
14413 { \
14414 CHECK_FIELD(a_Sel.Sel); \
14415 CHECK_FIELD(a_Sel.Attr.u); \
14416 CHECK_FIELD(a_Sel.u64Base); \
14417 CHECK_FIELD(a_Sel.u32Limit); \
14418 CHECK_FIELD(a_Sel.fFlags); \
14419 } while (0)
14420
14421 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14422 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14423
14424#if 1 /* The recompiler doesn't update these the intel way. */
14425 if (fRem)
14426 {
14427 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14428 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14429 pOrgXState->x87.CS = pDebugXState->x87.CS;
14430 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14431 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14432 pOrgXState->x87.DS = pDebugXState->x87.DS;
14433 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14434 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14435 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14436 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14437 }
14438#endif
14439 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14440 {
14441 RTAssertMsg2Weak(" the FPU state differs\n");
14442 cDiffs++;
14443 CHECK_XSTATE_FIELD(x87.FCW);
14444 CHECK_XSTATE_FIELD(x87.FSW);
14445 CHECK_XSTATE_FIELD(x87.FTW);
14446 CHECK_XSTATE_FIELD(x87.FOP);
14447 CHECK_XSTATE_FIELD(x87.FPUIP);
14448 CHECK_XSTATE_FIELD(x87.CS);
14449 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14450 CHECK_XSTATE_FIELD(x87.FPUDP);
14451 CHECK_XSTATE_FIELD(x87.DS);
14452 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14453 CHECK_XSTATE_FIELD(x87.MXCSR);
14454 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14455 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14456 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14457 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14458 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14459 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14460 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14461 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14462 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14463 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14464 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14465 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14466 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14467 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14468 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14469 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14470 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14471 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14472 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14473 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14474 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14475 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14476 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14477 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14478 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14479 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14480 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14481 }
14482 CHECK_FIELD(rip);
14483 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14484 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14485 {
14486 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14487 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14488 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14489 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14490 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14491 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14492 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14493 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14494 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14495 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14496 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14497 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14498 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14499 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14500 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14501 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14502 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14503 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14504 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14505 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14506 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14507 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14508 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14509 }
14510
14511 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14512 CHECK_FIELD(rax);
14513 CHECK_FIELD(rcx);
14514 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14515 CHECK_FIELD(rdx);
14516 CHECK_FIELD(rbx);
14517 CHECK_FIELD(rsp);
14518 CHECK_FIELD(rbp);
14519 CHECK_FIELD(rsi);
14520 CHECK_FIELD(rdi);
14521 CHECK_FIELD(r8);
14522 CHECK_FIELD(r9);
14523 CHECK_FIELD(r10);
14524 CHECK_FIELD(r11);
14525 CHECK_FIELD(r12);
14526 CHECK_FIELD(r13);
14527 CHECK_SEL(cs);
14528 CHECK_SEL(ss);
14529 CHECK_SEL(ds);
14530 CHECK_SEL(es);
14531 CHECK_SEL(fs);
14532 CHECK_SEL(gs);
14533 CHECK_FIELD(cr0);
14534
14535 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14536 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14537 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14538 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14539 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14540 {
14541 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14542 { /* ignore */ }
14543 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14544 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14545 && fRem)
14546 { /* ignore */ }
14547 else
14548 CHECK_FIELD(cr2);
14549 }
14550 CHECK_FIELD(cr3);
14551 CHECK_FIELD(cr4);
14552 CHECK_FIELD(dr[0]);
14553 CHECK_FIELD(dr[1]);
14554 CHECK_FIELD(dr[2]);
14555 CHECK_FIELD(dr[3]);
14556 CHECK_FIELD(dr[6]);
14557 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14558 CHECK_FIELD(dr[7]);
14559 CHECK_FIELD(gdtr.cbGdt);
14560 CHECK_FIELD(gdtr.pGdt);
14561 CHECK_FIELD(idtr.cbIdt);
14562 CHECK_FIELD(idtr.pIdt);
14563 CHECK_SEL(ldtr);
14564 CHECK_SEL(tr);
14565 CHECK_FIELD(SysEnter.cs);
14566 CHECK_FIELD(SysEnter.eip);
14567 CHECK_FIELD(SysEnter.esp);
14568 CHECK_FIELD(msrEFER);
14569 CHECK_FIELD(msrSTAR);
14570 CHECK_FIELD(msrPAT);
14571 CHECK_FIELD(msrLSTAR);
14572 CHECK_FIELD(msrCSTAR);
14573 CHECK_FIELD(msrSFMASK);
14574 CHECK_FIELD(msrKERNELGSBASE);
14575
14576 if (cDiffs != 0)
14577 {
14578 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14579 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14580 RTAssertPanic();
14581 static bool volatile s_fEnterDebugger = true;
14582 if (s_fEnterDebugger)
14583 DBGFSTOP(pVM);
14584
14585# if 1 /* Ignore unimplemented instructions for now. */
14586 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14587 rcStrictIem = VINF_SUCCESS;
14588# endif
14589 }
14590# undef CHECK_FIELD
14591# undef CHECK_BIT_FIELD
14592 }
14593
14594 /*
14595 * If the register state compared fine, check the verification event
14596 * records.
14597 */
14598 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14599 {
14600 /*
14601 * Compare verficiation event records.
14602 * - I/O port accesses should be a 1:1 match.
14603 */
14604 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14605 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14606 while (pIemRec && pOtherRec)
14607 {
14608 /* Since we might miss RAM writes and reads, ignore reads and check
14609 that any written memory is the same extra ones. */
14610 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14611 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14612 && pIemRec->pNext)
14613 {
14614 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14615 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14616 pIemRec = pIemRec->pNext;
14617 }
14618
14619 /* Do the compare. */
14620 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14621 {
14622 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14623 break;
14624 }
14625 bool fEquals;
14626 switch (pIemRec->enmEvent)
14627 {
14628 case IEMVERIFYEVENT_IOPORT_READ:
14629 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14630 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14631 break;
14632 case IEMVERIFYEVENT_IOPORT_WRITE:
14633 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14634 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14635 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14636 break;
14637 case IEMVERIFYEVENT_IOPORT_STR_READ:
14638 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14639 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14640 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14641 break;
14642 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14643 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14644 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14645 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14646 break;
14647 case IEMVERIFYEVENT_RAM_READ:
14648 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14649 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14650 break;
14651 case IEMVERIFYEVENT_RAM_WRITE:
14652 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14653 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14654 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14655 break;
14656 default:
14657 fEquals = false;
14658 break;
14659 }
14660 if (!fEquals)
14661 {
14662 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14663 break;
14664 }
14665
14666 /* advance */
14667 pIemRec = pIemRec->pNext;
14668 pOtherRec = pOtherRec->pNext;
14669 }
14670
14671 /* Ignore extra writes and reads. */
14672 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14673 {
14674 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14675 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14676 pIemRec = pIemRec->pNext;
14677 }
14678 if (pIemRec != NULL)
14679 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14680 else if (pOtherRec != NULL)
14681 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14682 }
14683 IEM_GET_CTX(pVCpu) = pOrgCtx;
14684
14685 return rcStrictIem;
14686}
14687
14688#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14689
14690/* stubs */
14691IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14692{
14693 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14694 return VERR_INTERNAL_ERROR;
14695}
14696
14697IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14698{
14699 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14700 return VERR_INTERNAL_ERROR;
14701}
14702
14703#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14704
14705
14706#ifdef LOG_ENABLED
14707/**
14708 * Logs the current instruction.
14709 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14710 * @param pCtx The current CPU context.
14711 * @param fSameCtx Set if we have the same context information as the VMM,
14712 * clear if we may have already executed an instruction in
14713 * our debug context. When clear, we assume IEMCPU holds
14714 * valid CPU mode info.
14715 */
14716IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14717{
14718# ifdef IN_RING3
14719 if (LogIs2Enabled())
14720 {
14721 char szInstr[256];
14722 uint32_t cbInstr = 0;
14723 if (fSameCtx)
14724 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14725 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14726 szInstr, sizeof(szInstr), &cbInstr);
14727 else
14728 {
14729 uint32_t fFlags = 0;
14730 switch (pVCpu->iem.s.enmCpuMode)
14731 {
14732 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14733 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14734 case IEMMODE_16BIT:
14735 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14736 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14737 else
14738 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14739 break;
14740 }
14741 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14742 szInstr, sizeof(szInstr), &cbInstr);
14743 }
14744
14745 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14746 Log2(("****\n"
14747 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14748 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14749 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14750 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14751 " %s\n"
14752 ,
14753 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14754 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14755 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14756 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14757 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14758 szInstr));
14759
14760 if (LogIs3Enabled())
14761 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14762 }
14763 else
14764# endif
14765 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14766 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14767 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14768}
14769#endif
14770
14771
14772/**
14773 * Makes status code addjustments (pass up from I/O and access handler)
14774 * as well as maintaining statistics.
14775 *
14776 * @returns Strict VBox status code to pass up.
14777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14778 * @param rcStrict The status from executing an instruction.
14779 */
14780DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14781{
14782 if (rcStrict != VINF_SUCCESS)
14783 {
14784 if (RT_SUCCESS(rcStrict))
14785 {
14786 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14787 || rcStrict == VINF_IOM_R3_IOPORT_READ
14788 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14789 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14790 || rcStrict == VINF_IOM_R3_MMIO_READ
14791 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14792 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14793 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14794 || rcStrict == VINF_CPUM_R3_MSR_READ
14795 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14796 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14797 || rcStrict == VINF_EM_RAW_TO_R3
14798 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14799 || rcStrict == VINF_EM_TRIPLE_FAULT
14800 /* raw-mode / virt handlers only: */
14801 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14802 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14803 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14804 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14805 || rcStrict == VINF_SELM_SYNC_GDT
14806 || rcStrict == VINF_CSAM_PENDING_ACTION
14807 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14808 /* nested hw.virt codes: */
14809 || rcStrict == VINF_SVM_VMEXIT
14810 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14811/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14812 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14813#ifdef VBOX_WITH_NESTED_HWVIRT
14814 if ( rcStrict == VINF_SVM_VMEXIT
14815 && rcPassUp == VINF_SUCCESS)
14816 rcStrict = VINF_SUCCESS;
14817 else
14818#endif
14819 if (rcPassUp == VINF_SUCCESS)
14820 pVCpu->iem.s.cRetInfStatuses++;
14821 else if ( rcPassUp < VINF_EM_FIRST
14822 || rcPassUp > VINF_EM_LAST
14823 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14824 {
14825 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14826 pVCpu->iem.s.cRetPassUpStatus++;
14827 rcStrict = rcPassUp;
14828 }
14829 else
14830 {
14831 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14832 pVCpu->iem.s.cRetInfStatuses++;
14833 }
14834 }
14835 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14836 pVCpu->iem.s.cRetAspectNotImplemented++;
14837 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14838 pVCpu->iem.s.cRetInstrNotImplemented++;
14839#ifdef IEM_VERIFICATION_MODE_FULL
14840 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14841 rcStrict = VINF_SUCCESS;
14842#endif
14843 else
14844 pVCpu->iem.s.cRetErrStatuses++;
14845 }
14846 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14847 {
14848 pVCpu->iem.s.cRetPassUpStatus++;
14849 rcStrict = pVCpu->iem.s.rcPassUp;
14850 }
14851
14852 return rcStrict;
14853}
14854
14855
14856/**
14857 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14858 * IEMExecOneWithPrefetchedByPC.
14859 *
14860 * Similar code is found in IEMExecLots.
14861 *
14862 * @return Strict VBox status code.
14863 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14865 * @param fExecuteInhibit If set, execute the instruction following CLI,
14866 * POP SS and MOV SS,GR.
14867 */
14868DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14869{
14870#ifdef IEM_WITH_SETJMP
14871 VBOXSTRICTRC rcStrict;
14872 jmp_buf JmpBuf;
14873 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14874 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14875 if ((rcStrict = setjmp(JmpBuf)) == 0)
14876 {
14877 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14878 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14879 }
14880 else
14881 pVCpu->iem.s.cLongJumps++;
14882 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14883#else
14884 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14885 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14886#endif
14887 if (rcStrict == VINF_SUCCESS)
14888 pVCpu->iem.s.cInstructions++;
14889 if (pVCpu->iem.s.cActiveMappings > 0)
14890 {
14891 Assert(rcStrict != VINF_SUCCESS);
14892 iemMemRollback(pVCpu);
14893 }
14894//#ifdef DEBUG
14895// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14896//#endif
14897
14898 /* Execute the next instruction as well if a cli, pop ss or
14899 mov ss, Gr has just completed successfully. */
14900 if ( fExecuteInhibit
14901 && rcStrict == VINF_SUCCESS
14902 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14903 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14904 {
14905 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14906 if (rcStrict == VINF_SUCCESS)
14907 {
14908#ifdef LOG_ENABLED
14909 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14910#endif
14911#ifdef IEM_WITH_SETJMP
14912 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14913 if ((rcStrict = setjmp(JmpBuf)) == 0)
14914 {
14915 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14916 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14917 }
14918 else
14919 pVCpu->iem.s.cLongJumps++;
14920 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14921#else
14922 IEM_OPCODE_GET_NEXT_U8(&b);
14923 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14924#endif
14925 if (rcStrict == VINF_SUCCESS)
14926 pVCpu->iem.s.cInstructions++;
14927 if (pVCpu->iem.s.cActiveMappings > 0)
14928 {
14929 Assert(rcStrict != VINF_SUCCESS);
14930 iemMemRollback(pVCpu);
14931 }
14932 }
14933 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14934 }
14935
14936 /*
14937 * Return value fiddling, statistics and sanity assertions.
14938 */
14939 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14940
14941 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14942 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14943#if defined(IEM_VERIFICATION_MODE_FULL)
14944 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14945 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14946 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14947 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14948#endif
14949 return rcStrict;
14950}
14951
14952
14953#ifdef IN_RC
14954/**
14955 * Re-enters raw-mode or ensure we return to ring-3.
14956 *
14957 * @returns rcStrict, maybe modified.
14958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14959 * @param pCtx The current CPU context.
14960 * @param rcStrict The status code returne by the interpreter.
14961 */
14962DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
14963{
14964 if ( !pVCpu->iem.s.fInPatchCode
14965 && ( rcStrict == VINF_SUCCESS
14966 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14967 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14968 {
14969 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14970 CPUMRawEnter(pVCpu);
14971 else
14972 {
14973 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14974 rcStrict = VINF_EM_RESCHEDULE;
14975 }
14976 }
14977 return rcStrict;
14978}
14979#endif
14980
14981
14982/**
14983 * Execute one instruction.
14984 *
14985 * @return Strict VBox status code.
14986 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14987 */
14988VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14989{
14990#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14991 if (++pVCpu->iem.s.cVerifyDepth == 1)
14992 iemExecVerificationModeSetup(pVCpu);
14993#endif
14994#ifdef LOG_ENABLED
14995 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14996 iemLogCurInstr(pVCpu, pCtx, true);
14997#endif
14998
14999 /*
15000 * Do the decoding and emulation.
15001 */
15002 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15003 if (rcStrict == VINF_SUCCESS)
15004 rcStrict = iemExecOneInner(pVCpu, true);
15005
15006#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15007 /*
15008 * Assert some sanity.
15009 */
15010 if (pVCpu->iem.s.cVerifyDepth == 1)
15011 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15012 pVCpu->iem.s.cVerifyDepth--;
15013#endif
15014#ifdef IN_RC
15015 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15016#endif
15017 if (rcStrict != VINF_SUCCESS)
15018 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15019 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15020 return rcStrict;
15021}
15022
15023
15024VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15025{
15026 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15027 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15028
15029 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15030 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15031 if (rcStrict == VINF_SUCCESS)
15032 {
15033 rcStrict = iemExecOneInner(pVCpu, true);
15034 if (pcbWritten)
15035 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15036 }
15037
15038#ifdef IN_RC
15039 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15040#endif
15041 return rcStrict;
15042}
15043
15044
15045VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15046 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15047{
15048 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15049 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15050
15051 VBOXSTRICTRC rcStrict;
15052 if ( cbOpcodeBytes
15053 && pCtx->rip == OpcodeBytesPC)
15054 {
15055 iemInitDecoder(pVCpu, false);
15056#ifdef IEM_WITH_CODE_TLB
15057 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15058 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15059 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15060 pVCpu->iem.s.offCurInstrStart = 0;
15061 pVCpu->iem.s.offInstrNextByte = 0;
15062#else
15063 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15064 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15065#endif
15066 rcStrict = VINF_SUCCESS;
15067 }
15068 else
15069 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15070 if (rcStrict == VINF_SUCCESS)
15071 {
15072 rcStrict = iemExecOneInner(pVCpu, true);
15073 }
15074
15075#ifdef IN_RC
15076 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15077#endif
15078 return rcStrict;
15079}
15080
15081
15082VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15083{
15084 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15085 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15086
15087 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15088 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15089 if (rcStrict == VINF_SUCCESS)
15090 {
15091 rcStrict = iemExecOneInner(pVCpu, false);
15092 if (pcbWritten)
15093 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15094 }
15095
15096#ifdef IN_RC
15097 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15098#endif
15099 return rcStrict;
15100}
15101
15102
15103VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15104 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15105{
15106 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15107 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15108
15109 VBOXSTRICTRC rcStrict;
15110 if ( cbOpcodeBytes
15111 && pCtx->rip == OpcodeBytesPC)
15112 {
15113 iemInitDecoder(pVCpu, true);
15114#ifdef IEM_WITH_CODE_TLB
15115 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15116 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15117 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15118 pVCpu->iem.s.offCurInstrStart = 0;
15119 pVCpu->iem.s.offInstrNextByte = 0;
15120#else
15121 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15122 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15123#endif
15124 rcStrict = VINF_SUCCESS;
15125 }
15126 else
15127 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15128 if (rcStrict == VINF_SUCCESS)
15129 rcStrict = iemExecOneInner(pVCpu, false);
15130
15131#ifdef IN_RC
15132 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15133#endif
15134 return rcStrict;
15135}
15136
15137
15138/**
15139 * For debugging DISGetParamSize, may come in handy.
15140 *
15141 * @returns Strict VBox status code.
15142 * @param pVCpu The cross context virtual CPU structure of the
15143 * calling EMT.
15144 * @param pCtxCore The context core structure.
15145 * @param OpcodeBytesPC The PC of the opcode bytes.
15146 * @param pvOpcodeBytes Prefeched opcode bytes.
15147 * @param cbOpcodeBytes Number of prefetched bytes.
15148 * @param pcbWritten Where to return the number of bytes written.
15149 * Optional.
15150 */
15151VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15152 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
15153 uint32_t *pcbWritten)
15154{
15155 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15156 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15157
15158 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15159 VBOXSTRICTRC rcStrict;
15160 if ( cbOpcodeBytes
15161 && pCtx->rip == OpcodeBytesPC)
15162 {
15163 iemInitDecoder(pVCpu, true);
15164#ifdef IEM_WITH_CODE_TLB
15165 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15166 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15167 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15168 pVCpu->iem.s.offCurInstrStart = 0;
15169 pVCpu->iem.s.offInstrNextByte = 0;
15170#else
15171 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15172 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15173#endif
15174 rcStrict = VINF_SUCCESS;
15175 }
15176 else
15177 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15178 if (rcStrict == VINF_SUCCESS)
15179 {
15180 rcStrict = iemExecOneInner(pVCpu, false);
15181 if (pcbWritten)
15182 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15183 }
15184
15185#ifdef IN_RC
15186 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15187#endif
15188 return rcStrict;
15189}
15190
15191
15192VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
15193{
15194 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
15195
15196#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15197 /*
15198 * See if there is an interrupt pending in TRPM, inject it if we can.
15199 */
15200 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15201# ifdef IEM_VERIFICATION_MODE_FULL
15202 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15203# endif
15204
15205 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
15206#if defined(VBOX_WITH_NESTED_HWVIRT)
15207 bool fIntrEnabled = pCtx->hwvirt.svm.fGif;
15208 if (fIntrEnabled)
15209 {
15210 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15211 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15212 else
15213 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15214 }
15215#else
15216 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15217#endif
15218 if ( fIntrEnabled
15219 && TRPMHasTrap(pVCpu)
15220 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15221 {
15222 uint8_t u8TrapNo;
15223 TRPMEVENT enmType;
15224 RTGCUINT uErrCode;
15225 RTGCPTR uCr2;
15226 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15227 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15228 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15229 TRPMResetTrap(pVCpu);
15230 }
15231
15232 /*
15233 * Log the state.
15234 */
15235# ifdef LOG_ENABLED
15236 iemLogCurInstr(pVCpu, pCtx, true);
15237# endif
15238
15239 /*
15240 * Do the decoding and emulation.
15241 */
15242 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15243 if (rcStrict == VINF_SUCCESS)
15244 rcStrict = iemExecOneInner(pVCpu, true);
15245
15246 /*
15247 * Assert some sanity.
15248 */
15249 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15250
15251 /*
15252 * Log and return.
15253 */
15254 if (rcStrict != VINF_SUCCESS)
15255 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15256 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15257 if (pcInstructions)
15258 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15259 return rcStrict;
15260
15261#else /* Not verification mode */
15262
15263 /*
15264 * See if there is an interrupt pending in TRPM, inject it if we can.
15265 */
15266 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15267# ifdef IEM_VERIFICATION_MODE_FULL
15268 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15269# endif
15270
15271 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
15272#if defined(VBOX_WITH_NESTED_HWVIRT)
15273 bool fIntrEnabled = pCtx->hwvirt.svm.fGif;
15274 if (fIntrEnabled)
15275 {
15276 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15277 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15278 else
15279 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15280 }
15281#else
15282 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15283#endif
15284 if ( fIntrEnabled
15285 && TRPMHasTrap(pVCpu)
15286 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15287 {
15288 uint8_t u8TrapNo;
15289 TRPMEVENT enmType;
15290 RTGCUINT uErrCode;
15291 RTGCPTR uCr2;
15292 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15293 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15294 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15295 TRPMResetTrap(pVCpu);
15296 }
15297
15298 /*
15299 * Initial decoder init w/ prefetch, then setup setjmp.
15300 */
15301 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15302 if (rcStrict == VINF_SUCCESS)
15303 {
15304# ifdef IEM_WITH_SETJMP
15305 jmp_buf JmpBuf;
15306 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15307 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15308 pVCpu->iem.s.cActiveMappings = 0;
15309 if ((rcStrict = setjmp(JmpBuf)) == 0)
15310# endif
15311 {
15312 /*
15313 * The run loop. We limit ourselves to 4096 instructions right now.
15314 */
15315 PVM pVM = pVCpu->CTX_SUFF(pVM);
15316 uint32_t cInstr = 4096;
15317 for (;;)
15318 {
15319 /*
15320 * Log the state.
15321 */
15322# ifdef LOG_ENABLED
15323 iemLogCurInstr(pVCpu, pCtx, true);
15324# endif
15325
15326 /*
15327 * Do the decoding and emulation.
15328 */
15329 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15330 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15331 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15332 {
15333 Assert(pVCpu->iem.s.cActiveMappings == 0);
15334 pVCpu->iem.s.cInstructions++;
15335 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15336 {
15337 uint32_t fCpu = pVCpu->fLocalForcedActions
15338 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15339 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15340 | VMCPU_FF_TLB_FLUSH
15341# ifdef VBOX_WITH_RAW_MODE
15342 | VMCPU_FF_TRPM_SYNC_IDT
15343 | VMCPU_FF_SELM_SYNC_TSS
15344 | VMCPU_FF_SELM_SYNC_GDT
15345 | VMCPU_FF_SELM_SYNC_LDT
15346# endif
15347 | VMCPU_FF_INHIBIT_INTERRUPTS
15348 | VMCPU_FF_BLOCK_NMIS
15349 | VMCPU_FF_UNHALT ));
15350
15351 if (RT_LIKELY( ( !fCpu
15352 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15353 && !pCtx->rflags.Bits.u1IF) )
15354 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15355 {
15356 if (cInstr-- > 0)
15357 {
15358 Assert(pVCpu->iem.s.cActiveMappings == 0);
15359 iemReInitDecoder(pVCpu);
15360 continue;
15361 }
15362 }
15363 }
15364 Assert(pVCpu->iem.s.cActiveMappings == 0);
15365 }
15366 else if (pVCpu->iem.s.cActiveMappings > 0)
15367 iemMemRollback(pVCpu);
15368 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15369 break;
15370 }
15371 }
15372# ifdef IEM_WITH_SETJMP
15373 else
15374 {
15375 if (pVCpu->iem.s.cActiveMappings > 0)
15376 iemMemRollback(pVCpu);
15377 pVCpu->iem.s.cLongJumps++;
15378# ifdef VBOX_WITH_NESTED_HWVIRT
15379 /*
15380 * When a nested-guest causes an exception intercept when fetching memory
15381 * (e.g. IEM_MC_FETCH_MEM_U16) as part of instruction execution, we need this
15382 * to fix-up VINF_SVM_VMEXIT on the longjmp way out, otherwise we will guru.
15383 */
15384 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15385# endif
15386 }
15387 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15388# endif
15389
15390 /*
15391 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15392 */
15393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15394 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15395# if defined(IEM_VERIFICATION_MODE_FULL)
15396 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15397 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15398 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15399 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15400# endif
15401 }
15402# ifdef VBOX_WITH_NESTED_HWVIRT
15403 else
15404 {
15405 /*
15406 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
15407 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
15408 */
15409 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15410 }
15411# endif
15412
15413 /*
15414 * Maybe re-enter raw-mode and log.
15415 */
15416# ifdef IN_RC
15417 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15418# endif
15419 if (rcStrict != VINF_SUCCESS)
15420 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15421 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15422 if (pcInstructions)
15423 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15424 return rcStrict;
15425#endif /* Not verification mode */
15426}
15427
15428
15429
15430/**
15431 * Injects a trap, fault, abort, software interrupt or external interrupt.
15432 *
15433 * The parameter list matches TRPMQueryTrapAll pretty closely.
15434 *
15435 * @returns Strict VBox status code.
15436 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15437 * @param u8TrapNo The trap number.
15438 * @param enmType What type is it (trap/fault/abort), software
15439 * interrupt or hardware interrupt.
15440 * @param uErrCode The error code if applicable.
15441 * @param uCr2 The CR2 value if applicable.
15442 * @param cbInstr The instruction length (only relevant for
15443 * software interrupts).
15444 */
15445VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15446 uint8_t cbInstr)
15447{
15448 iemInitDecoder(pVCpu, false);
15449#ifdef DBGFTRACE_ENABLED
15450 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15451 u8TrapNo, enmType, uErrCode, uCr2);
15452#endif
15453
15454 uint32_t fFlags;
15455 switch (enmType)
15456 {
15457 case TRPM_HARDWARE_INT:
15458 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15459 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15460 uErrCode = uCr2 = 0;
15461 break;
15462
15463 case TRPM_SOFTWARE_INT:
15464 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15465 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15466 uErrCode = uCr2 = 0;
15467 break;
15468
15469 case TRPM_TRAP:
15470 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15471 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15472 if (u8TrapNo == X86_XCPT_PF)
15473 fFlags |= IEM_XCPT_FLAGS_CR2;
15474 switch (u8TrapNo)
15475 {
15476 case X86_XCPT_DF:
15477 case X86_XCPT_TS:
15478 case X86_XCPT_NP:
15479 case X86_XCPT_SS:
15480 case X86_XCPT_PF:
15481 case X86_XCPT_AC:
15482 fFlags |= IEM_XCPT_FLAGS_ERR;
15483 break;
15484
15485 case X86_XCPT_NMI:
15486 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15487 break;
15488 }
15489 break;
15490
15491 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15492 }
15493
15494 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15495}
15496
15497
15498/**
15499 * Injects the active TRPM event.
15500 *
15501 * @returns Strict VBox status code.
15502 * @param pVCpu The cross context virtual CPU structure.
15503 */
15504VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15505{
15506#ifndef IEM_IMPLEMENTS_TASKSWITCH
15507 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15508#else
15509 uint8_t u8TrapNo;
15510 TRPMEVENT enmType;
15511 RTGCUINT uErrCode;
15512 RTGCUINTPTR uCr2;
15513 uint8_t cbInstr;
15514 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15515 if (RT_FAILURE(rc))
15516 return rc;
15517
15518 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15519
15520 /** @todo Are there any other codes that imply the event was successfully
15521 * delivered to the guest? See @bugref{6607}. */
15522 if ( rcStrict == VINF_SUCCESS
15523 || rcStrict == VINF_IEM_RAISED_XCPT)
15524 {
15525 TRPMResetTrap(pVCpu);
15526 }
15527 return rcStrict;
15528#endif
15529}
15530
15531
15532VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15533{
15534 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15535 return VERR_NOT_IMPLEMENTED;
15536}
15537
15538
15539VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15540{
15541 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15542 return VERR_NOT_IMPLEMENTED;
15543}
15544
15545
15546#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15547/**
15548 * Executes a IRET instruction with default operand size.
15549 *
15550 * This is for PATM.
15551 *
15552 * @returns VBox status code.
15553 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15554 * @param pCtxCore The register frame.
15555 */
15556VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15557{
15558 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15559
15560 iemCtxCoreToCtx(pCtx, pCtxCore);
15561 iemInitDecoder(pVCpu);
15562 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15563 if (rcStrict == VINF_SUCCESS)
15564 iemCtxToCtxCore(pCtxCore, pCtx);
15565 else
15566 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15567 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15568 return rcStrict;
15569}
15570#endif
15571
15572
15573/**
15574 * Macro used by the IEMExec* method to check the given instruction length.
15575 *
15576 * Will return on failure!
15577 *
15578 * @param a_cbInstr The given instruction length.
15579 * @param a_cbMin The minimum length.
15580 */
15581#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15582 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15583 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15584
15585
15586/**
15587 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15588 *
15589 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15590 *
15591 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15593 * @param rcStrict The status code to fiddle.
15594 */
15595DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15596{
15597 iemUninitExec(pVCpu);
15598#ifdef IN_RC
15599 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15600 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15601#else
15602 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15603#endif
15604}
15605
15606
15607/**
15608 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15609 *
15610 * This API ASSUMES that the caller has already verified that the guest code is
15611 * allowed to access the I/O port. (The I/O port is in the DX register in the
15612 * guest state.)
15613 *
15614 * @returns Strict VBox status code.
15615 * @param pVCpu The cross context virtual CPU structure.
15616 * @param cbValue The size of the I/O port access (1, 2, or 4).
15617 * @param enmAddrMode The addressing mode.
15618 * @param fRepPrefix Indicates whether a repeat prefix is used
15619 * (doesn't matter which for this instruction).
15620 * @param cbInstr The instruction length in bytes.
15621 * @param iEffSeg The effective segment address.
15622 * @param fIoChecked Whether the access to the I/O port has been
15623 * checked or not. It's typically checked in the
15624 * HM scenario.
15625 */
15626VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15627 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15628{
15629 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15630 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15631
15632 /*
15633 * State init.
15634 */
15635 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15636
15637 /*
15638 * Switch orgy for getting to the right handler.
15639 */
15640 VBOXSTRICTRC rcStrict;
15641 if (fRepPrefix)
15642 {
15643 switch (enmAddrMode)
15644 {
15645 case IEMMODE_16BIT:
15646 switch (cbValue)
15647 {
15648 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15649 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15650 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15651 default:
15652 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15653 }
15654 break;
15655
15656 case IEMMODE_32BIT:
15657 switch (cbValue)
15658 {
15659 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15660 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15661 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15662 default:
15663 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15664 }
15665 break;
15666
15667 case IEMMODE_64BIT:
15668 switch (cbValue)
15669 {
15670 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15671 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15672 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15673 default:
15674 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15675 }
15676 break;
15677
15678 default:
15679 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15680 }
15681 }
15682 else
15683 {
15684 switch (enmAddrMode)
15685 {
15686 case IEMMODE_16BIT:
15687 switch (cbValue)
15688 {
15689 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15690 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15691 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15692 default:
15693 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15694 }
15695 break;
15696
15697 case IEMMODE_32BIT:
15698 switch (cbValue)
15699 {
15700 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15701 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15702 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15703 default:
15704 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15705 }
15706 break;
15707
15708 case IEMMODE_64BIT:
15709 switch (cbValue)
15710 {
15711 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15712 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15713 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15714 default:
15715 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15716 }
15717 break;
15718
15719 default:
15720 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15721 }
15722 }
15723
15724 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15725}
15726
15727
15728/**
15729 * Interface for HM and EM for executing string I/O IN (read) instructions.
15730 *
15731 * This API ASSUMES that the caller has already verified that the guest code is
15732 * allowed to access the I/O port. (The I/O port is in the DX register in the
15733 * guest state.)
15734 *
15735 * @returns Strict VBox status code.
15736 * @param pVCpu The cross context virtual CPU structure.
15737 * @param cbValue The size of the I/O port access (1, 2, or 4).
15738 * @param enmAddrMode The addressing mode.
15739 * @param fRepPrefix Indicates whether a repeat prefix is used
15740 * (doesn't matter which for this instruction).
15741 * @param cbInstr The instruction length in bytes.
15742 * @param fIoChecked Whether the access to the I/O port has been
15743 * checked or not. It's typically checked in the
15744 * HM scenario.
15745 */
15746VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15747 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15748{
15749 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15750
15751 /*
15752 * State init.
15753 */
15754 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15755
15756 /*
15757 * Switch orgy for getting to the right handler.
15758 */
15759 VBOXSTRICTRC rcStrict;
15760 if (fRepPrefix)
15761 {
15762 switch (enmAddrMode)
15763 {
15764 case IEMMODE_16BIT:
15765 switch (cbValue)
15766 {
15767 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15768 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15769 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15770 default:
15771 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15772 }
15773 break;
15774
15775 case IEMMODE_32BIT:
15776 switch (cbValue)
15777 {
15778 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15779 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15780 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15781 default:
15782 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15783 }
15784 break;
15785
15786 case IEMMODE_64BIT:
15787 switch (cbValue)
15788 {
15789 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15790 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15791 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15792 default:
15793 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15794 }
15795 break;
15796
15797 default:
15798 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15799 }
15800 }
15801 else
15802 {
15803 switch (enmAddrMode)
15804 {
15805 case IEMMODE_16BIT:
15806 switch (cbValue)
15807 {
15808 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15809 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15810 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15811 default:
15812 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15813 }
15814 break;
15815
15816 case IEMMODE_32BIT:
15817 switch (cbValue)
15818 {
15819 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15820 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15821 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15822 default:
15823 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15824 }
15825 break;
15826
15827 case IEMMODE_64BIT:
15828 switch (cbValue)
15829 {
15830 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15831 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15832 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15833 default:
15834 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15835 }
15836 break;
15837
15838 default:
15839 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15840 }
15841 }
15842
15843 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15844}
15845
15846
15847/**
15848 * Interface for rawmode to write execute an OUT instruction.
15849 *
15850 * @returns Strict VBox status code.
15851 * @param pVCpu The cross context virtual CPU structure.
15852 * @param cbInstr The instruction length in bytes.
15853 * @param u16Port The port to read.
15854 * @param cbReg The register size.
15855 *
15856 * @remarks In ring-0 not all of the state needs to be synced in.
15857 */
15858VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15859{
15860 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15861 Assert(cbReg <= 4 && cbReg != 3);
15862
15863 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15864 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15865 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15866}
15867
15868
15869/**
15870 * Interface for rawmode to write execute an IN instruction.
15871 *
15872 * @returns Strict VBox status code.
15873 * @param pVCpu The cross context virtual CPU structure.
15874 * @param cbInstr The instruction length in bytes.
15875 * @param u16Port The port to read.
15876 * @param cbReg The register size.
15877 */
15878VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15879{
15880 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15881 Assert(cbReg <= 4 && cbReg != 3);
15882
15883 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15884 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15885 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15886}
15887
15888
15889/**
15890 * Interface for HM and EM to write to a CRx register.
15891 *
15892 * @returns Strict VBox status code.
15893 * @param pVCpu The cross context virtual CPU structure.
15894 * @param cbInstr The instruction length in bytes.
15895 * @param iCrReg The control register number (destination).
15896 * @param iGReg The general purpose register number (source).
15897 *
15898 * @remarks In ring-0 not all of the state needs to be synced in.
15899 */
15900VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15901{
15902 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15903 Assert(iCrReg < 16);
15904 Assert(iGReg < 16);
15905
15906 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15907 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15908 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15909}
15910
15911
15912/**
15913 * Interface for HM and EM to read from a CRx register.
15914 *
15915 * @returns Strict VBox status code.
15916 * @param pVCpu The cross context virtual CPU structure.
15917 * @param cbInstr The instruction length in bytes.
15918 * @param iGReg The general purpose register number (destination).
15919 * @param iCrReg The control register number (source).
15920 *
15921 * @remarks In ring-0 not all of the state needs to be synced in.
15922 */
15923VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15924{
15925 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15926 Assert(iCrReg < 16);
15927 Assert(iGReg < 16);
15928
15929 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15930 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15931 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15932}
15933
15934
15935/**
15936 * Interface for HM and EM to clear the CR0[TS] bit.
15937 *
15938 * @returns Strict VBox status code.
15939 * @param pVCpu The cross context virtual CPU structure.
15940 * @param cbInstr The instruction length in bytes.
15941 *
15942 * @remarks In ring-0 not all of the state needs to be synced in.
15943 */
15944VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15945{
15946 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15947
15948 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15949 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15950 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15951}
15952
15953
15954/**
15955 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15956 *
15957 * @returns Strict VBox status code.
15958 * @param pVCpu The cross context virtual CPU structure.
15959 * @param cbInstr The instruction length in bytes.
15960 * @param uValue The value to load into CR0.
15961 *
15962 * @remarks In ring-0 not all of the state needs to be synced in.
15963 */
15964VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15965{
15966 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15967
15968 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15969 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15970 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15971}
15972
15973
15974/**
15975 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15976 *
15977 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15978 *
15979 * @returns Strict VBox status code.
15980 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15981 * @param cbInstr The instruction length in bytes.
15982 * @remarks In ring-0 not all of the state needs to be synced in.
15983 * @thread EMT(pVCpu)
15984 */
15985VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15986{
15987 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15988
15989 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15990 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15991 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15992}
15993
15994
15995/**
15996 * Interface for HM and EM to emulate the INVLPG instruction.
15997 *
15998 * @param pVCpu The cross context virtual CPU structure.
15999 * @param cbInstr The instruction length in bytes.
16000 * @param GCPtrPage The effective address of the page to invalidate.
16001 *
16002 * @remarks In ring-0 not all of the state needs to be synced in.
16003 */
16004VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
16005{
16006 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16007
16008 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16009 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
16010 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16011}
16012
16013
16014/**
16015 * Checks if IEM is in the process of delivering an event (interrupt or
16016 * exception).
16017 *
16018 * @returns true if we're in the process of raising an interrupt or exception,
16019 * false otherwise.
16020 * @param pVCpu The cross context virtual CPU structure.
16021 * @param puVector Where to store the vector associated with the
16022 * currently delivered event, optional.
16023 * @param pfFlags Where to store th event delivery flags (see
16024 * IEM_XCPT_FLAGS_XXX), optional.
16025 * @param puErr Where to store the error code associated with the
16026 * event, optional.
16027 * @param puCr2 Where to store the CR2 associated with the event,
16028 * optional.
16029 * @remarks The caller should check the flags to determine if the error code and
16030 * CR2 are valid for the event.
16031 */
16032VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
16033{
16034 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16035 if (fRaisingXcpt)
16036 {
16037 if (puVector)
16038 *puVector = pVCpu->iem.s.uCurXcpt;
16039 if (pfFlags)
16040 *pfFlags = pVCpu->iem.s.fCurXcpt;
16041 if (puErr)
16042 *puErr = pVCpu->iem.s.uCurXcptErr;
16043 if (puCr2)
16044 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16045 }
16046 return fRaisingXcpt;
16047}
16048
16049#ifdef VBOX_WITH_NESTED_HWVIRT
16050/**
16051 * Interface for HM and EM to emulate the CLGI instruction.
16052 *
16053 * @returns Strict VBox status code.
16054 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16055 * @param cbInstr The instruction length in bytes.
16056 * @thread EMT(pVCpu)
16057 */
16058VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
16059{
16060 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16061
16062 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16063 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16064 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16065}
16066
16067
16068/**
16069 * Interface for HM and EM to emulate the STGI instruction.
16070 *
16071 * @returns Strict VBox status code.
16072 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16073 * @param cbInstr The instruction length in bytes.
16074 * @thread EMT(pVCpu)
16075 */
16076VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
16077{
16078 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16079
16080 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16081 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16082 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16083}
16084
16085
16086/**
16087 * Interface for HM and EM to emulate the VMLOAD instruction.
16088 *
16089 * @returns Strict VBox status code.
16090 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16091 * @param cbInstr The instruction length in bytes.
16092 * @thread EMT(pVCpu)
16093 */
16094VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
16095{
16096 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16097
16098 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16099 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16100 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16101}
16102
16103
16104/**
16105 * Interface for HM and EM to emulate the VMSAVE instruction.
16106 *
16107 * @returns Strict VBox status code.
16108 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16109 * @param cbInstr The instruction length in bytes.
16110 * @thread EMT(pVCpu)
16111 */
16112VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
16113{
16114 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16115
16116 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16117 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16118 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16119}
16120
16121
16122/**
16123 * Interface for HM and EM to emulate the INVLPGA instruction.
16124 *
16125 * @returns Strict VBox status code.
16126 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16127 * @param cbInstr The instruction length in bytes.
16128 * @thread EMT(pVCpu)
16129 */
16130VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
16131{
16132 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16133
16134 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16135 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16136 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16137}
16138
16139
16140/**
16141 * Interface for HM and EM to emulate the VMRUN instruction.
16142 *
16143 * @returns Strict VBox status code.
16144 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16145 * @param cbInstr The instruction length in bytes.
16146 * @thread EMT(pVCpu)
16147 */
16148VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
16149{
16150 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16151
16152 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16153 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
16154 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16155}
16156
16157
16158/**
16159 * Interface for HM and EM to emulate \#VMEXIT.
16160 *
16161 * @returns Strict VBox status code.
16162 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16163 * @param uExitCode The exit code.
16164 * @param uExitInfo1 The exit info. 1 field.
16165 * @param uExitInfo2 The exit info. 2 field.
16166 * @thread EMT(pVCpu)
16167 */
16168VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
16169{
16170 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2);
16171 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16172}
16173#endif /* VBOX_WITH_NESTED_HWVIRT */
16174
16175#ifdef IN_RING3
16176
16177/**
16178 * Handles the unlikely and probably fatal merge cases.
16179 *
16180 * @returns Merged status code.
16181 * @param rcStrict Current EM status code.
16182 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16183 * with @a rcStrict.
16184 * @param iMemMap The memory mapping index. For error reporting only.
16185 * @param pVCpu The cross context virtual CPU structure of the calling
16186 * thread, for error reporting only.
16187 */
16188DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16189 unsigned iMemMap, PVMCPU pVCpu)
16190{
16191 if (RT_FAILURE_NP(rcStrict))
16192 return rcStrict;
16193
16194 if (RT_FAILURE_NP(rcStrictCommit))
16195 return rcStrictCommit;
16196
16197 if (rcStrict == rcStrictCommit)
16198 return rcStrictCommit;
16199
16200 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16201 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16202 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16203 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16204 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16205 return VERR_IOM_FF_STATUS_IPE;
16206}
16207
16208
16209/**
16210 * Helper for IOMR3ProcessForceFlag.
16211 *
16212 * @returns Merged status code.
16213 * @param rcStrict Current EM status code.
16214 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16215 * with @a rcStrict.
16216 * @param iMemMap The memory mapping index. For error reporting only.
16217 * @param pVCpu The cross context virtual CPU structure of the calling
16218 * thread, for error reporting only.
16219 */
16220DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16221{
16222 /* Simple. */
16223 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16224 return rcStrictCommit;
16225
16226 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16227 return rcStrict;
16228
16229 /* EM scheduling status codes. */
16230 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16231 && rcStrict <= VINF_EM_LAST))
16232 {
16233 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16234 && rcStrictCommit <= VINF_EM_LAST))
16235 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16236 }
16237
16238 /* Unlikely */
16239 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16240}
16241
16242
16243/**
16244 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16245 *
16246 * @returns Merge between @a rcStrict and what the commit operation returned.
16247 * @param pVM The cross context VM structure.
16248 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16249 * @param rcStrict The status code returned by ring-0 or raw-mode.
16250 */
16251VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16252{
16253 /*
16254 * Reset the pending commit.
16255 */
16256 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16257 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16258 ("%#x %#x %#x\n",
16259 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16260 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16261
16262 /*
16263 * Commit the pending bounce buffers (usually just one).
16264 */
16265 unsigned cBufs = 0;
16266 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16267 while (iMemMap-- > 0)
16268 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16269 {
16270 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16271 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16272 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16273
16274 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16275 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16276 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16277
16278 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16279 {
16280 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16281 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16282 pbBuf,
16283 cbFirst,
16284 PGMACCESSORIGIN_IEM);
16285 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16286 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16287 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16288 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16289 }
16290
16291 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16292 {
16293 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16294 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16295 pbBuf + cbFirst,
16296 cbSecond,
16297 PGMACCESSORIGIN_IEM);
16298 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16299 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16300 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16301 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16302 }
16303 cBufs++;
16304 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16305 }
16306
16307 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16308 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16309 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16310 pVCpu->iem.s.cActiveMappings = 0;
16311 return rcStrict;
16312}
16313
16314#endif /* IN_RING3 */
16315
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette