VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 71417

Last change on this file since 71417 was 71092, checked in by vboxsync, 7 years ago

VMM/IEM: Nested Hw.virt: Implement SVM decode-assist and NRIP feature.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 639.0 KB
Line 
1/* $Id: IEMAll.cpp 71092 2018-02-22 09:14:46Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/em.h>
106# include <VBox/vmm/hm_svm.h>
107#endif
108#include <VBox/vmm/tm.h>
109#include <VBox/vmm/dbgf.h>
110#include <VBox/vmm/dbgftrace.h>
111#ifdef VBOX_WITH_RAW_MODE_NOT_R0
112# include <VBox/vmm/patm.h>
113# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
114# include <VBox/vmm/csam.h>
115# endif
116#endif
117#include "IEMInternal.h"
118#ifdef IEM_VERIFICATION_MODE_FULL
119# include <VBox/vmm/rem.h>
120# include <VBox/vmm/mm.h>
121#endif
122#include <VBox/vmm/vm.h>
123#include <VBox/log.h>
124#include <VBox/err.h>
125#include <VBox/param.h>
126#include <VBox/dis.h>
127#include <VBox/disopcode.h>
128#include <iprt/assert.h>
129#include <iprt/string.h>
130#include <iprt/x86.h>
131
132
133/*********************************************************************************************************************************
134* Structures and Typedefs *
135*********************************************************************************************************************************/
136/** @typedef PFNIEMOP
137 * Pointer to an opcode decoder function.
138 */
139
140/** @def FNIEMOP_DEF
141 * Define an opcode decoder function.
142 *
143 * We're using macors for this so that adding and removing parameters as well as
144 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
145 *
146 * @param a_Name The function name.
147 */
148
149/** @typedef PFNIEMOPRM
150 * Pointer to an opcode decoder function with RM byte.
151 */
152
153/** @def FNIEMOPRM_DEF
154 * Define an opcode decoder function with RM byte.
155 *
156 * We're using macors for this so that adding and removing parameters as well as
157 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
158 *
159 * @param a_Name The function name.
160 */
161
162#if defined(__GNUC__) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
171
172#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
181
182#elif defined(__GNUC__)
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
191
192#else
193typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
194typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
195# define FNIEMOP_DEF(a_Name) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
197# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
198 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
199# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
200 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
201
202#endif
203#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
204
205
206/**
207 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
208 */
209typedef union IEMSELDESC
210{
211 /** The legacy view. */
212 X86DESC Legacy;
213 /** The long mode view. */
214 X86DESC64 Long;
215} IEMSELDESC;
216/** Pointer to a selector descriptor table entry. */
217typedef IEMSELDESC *PIEMSELDESC;
218
219/**
220 * CPU exception classes.
221 */
222typedef enum IEMXCPTCLASS
223{
224 IEMXCPTCLASS_BENIGN,
225 IEMXCPTCLASS_CONTRIBUTORY,
226 IEMXCPTCLASS_PAGE_FAULT,
227 IEMXCPTCLASS_DOUBLE_FAULT
228} IEMXCPTCLASS;
229
230
231/*********************************************************************************************************************************
232* Defined Constants And Macros *
233*********************************************************************************************************************************/
234/** @def IEM_WITH_SETJMP
235 * Enables alternative status code handling using setjmps.
236 *
237 * This adds a bit of expense via the setjmp() call since it saves all the
238 * non-volatile registers. However, it eliminates return code checks and allows
239 * for more optimal return value passing (return regs instead of stack buffer).
240 */
241#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
242# define IEM_WITH_SETJMP
243#endif
244
245/** Temporary hack to disable the double execution. Will be removed in favor
246 * of a dedicated execution mode in EM. */
247//#define IEM_VERIFICATION_MODE_NO_REM
248
249/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
250 * due to GCC lacking knowledge about the value range of a switch. */
251#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
252
253/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
254#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation.
259 */
260#ifdef LOG_ENABLED
261# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
262 do { \
263 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
265 } while (0)
266#else
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
269#endif
270
271/**
272 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
273 * occation using the supplied logger statement.
274 *
275 * @param a_LoggerArgs What to log on failure.
276 */
277#ifdef LOG_ENABLED
278# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
279 do { \
280 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
281 /*LogFunc(a_LoggerArgs);*/ \
282 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
283 } while (0)
284#else
285# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
286 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
287#endif
288
289/**
290 * Call an opcode decoder function.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF.
294 */
295#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
304
305/**
306 * Call a common opcode decoder function taking one extra argument.
307 *
308 * We're using macors for this so that adding and removing parameters can be
309 * done as we please. See FNIEMOP_DEF_1.
310 */
311#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
312
313/**
314 * Check if we're currently executing in real or virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The IEM state of the current CPU.
318 */
319#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in virtual 8086 mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in long mode.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT
391/**
392 * Check the common SVM instruction preconditions.
393 */
394# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
395 do { \
396 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
397 { \
398 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
399 return iemRaiseUndefinedOpcode(pVCpu); \
400 } \
401 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
402 { \
403 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
404 return iemRaiseUndefinedOpcode(pVCpu); \
405 } \
406 if (pVCpu->iem.s.uCpl != 0) \
407 { \
408 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
409 return iemRaiseGeneralProtectionFault0(pVCpu); \
410 } \
411 } while (0)
412
413/**
414 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
415 */
416# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
417 do { \
418 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
419 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
420 } while (0)
421
422/**
423 * Check if an SVM is enabled.
424 */
425# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
426
427/**
428 * Check if an SVM control/instruction intercept is set.
429 */
430# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
431
432/**
433 * Check if an SVM read CRx intercept is set.
434 */
435# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
436
437/**
438 * Check if an SVM write CRx intercept is set.
439 */
440# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
441
442/**
443 * Check if an SVM read DRx intercept is set.
444 */
445# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
446
447/**
448 * Check if an SVM write DRx intercept is set.
449 */
450# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
451
452/**
453 * Check if an SVM exception intercept is set.
454 */
455# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
456
457/**
458 * Invokes the SVM \#VMEXIT handler for the nested-guest.
459 */
460# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
461 do \
462 { \
463 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
464 } while (0)
465
466/**
467 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
468 * corresponding decode assist information.
469 */
470# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
471 do \
472 { \
473 uint64_t uExitInfo1; \
474 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
475 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
476 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
477 else \
478 uExitInfo1 = 0; \
479 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
480 } while (0)
481
482#else
483# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
484# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
485# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
486# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
487# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
488# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
489# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
490# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
491# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
492# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
493# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
494
495#endif /* VBOX_WITH_NESTED_HWVIRT */
496
497
498/*********************************************************************************************************************************
499* Global Variables *
500*********************************************************************************************************************************/
501extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
502
503
504/** Function table for the ADD instruction. */
505IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
506{
507 iemAImpl_add_u8, iemAImpl_add_u8_locked,
508 iemAImpl_add_u16, iemAImpl_add_u16_locked,
509 iemAImpl_add_u32, iemAImpl_add_u32_locked,
510 iemAImpl_add_u64, iemAImpl_add_u64_locked
511};
512
513/** Function table for the ADC instruction. */
514IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
515{
516 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
517 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
518 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
519 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
520};
521
522/** Function table for the SUB instruction. */
523IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
524{
525 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
526 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
527 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
528 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
529};
530
531/** Function table for the SBB instruction. */
532IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
533{
534 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
535 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
536 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
537 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
538};
539
540/** Function table for the OR instruction. */
541IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
542{
543 iemAImpl_or_u8, iemAImpl_or_u8_locked,
544 iemAImpl_or_u16, iemAImpl_or_u16_locked,
545 iemAImpl_or_u32, iemAImpl_or_u32_locked,
546 iemAImpl_or_u64, iemAImpl_or_u64_locked
547};
548
549/** Function table for the XOR instruction. */
550IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
551{
552 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
553 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
554 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
555 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
556};
557
558/** Function table for the AND instruction. */
559IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
560{
561 iemAImpl_and_u8, iemAImpl_and_u8_locked,
562 iemAImpl_and_u16, iemAImpl_and_u16_locked,
563 iemAImpl_and_u32, iemAImpl_and_u32_locked,
564 iemAImpl_and_u64, iemAImpl_and_u64_locked
565};
566
567/** Function table for the CMP instruction.
568 * @remarks Making operand order ASSUMPTIONS.
569 */
570IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
571{
572 iemAImpl_cmp_u8, NULL,
573 iemAImpl_cmp_u16, NULL,
574 iemAImpl_cmp_u32, NULL,
575 iemAImpl_cmp_u64, NULL
576};
577
578/** Function table for the TEST instruction.
579 * @remarks Making operand order ASSUMPTIONS.
580 */
581IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
582{
583 iemAImpl_test_u8, NULL,
584 iemAImpl_test_u16, NULL,
585 iemAImpl_test_u32, NULL,
586 iemAImpl_test_u64, NULL
587};
588
589/** Function table for the BT instruction. */
590IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
591{
592 NULL, NULL,
593 iemAImpl_bt_u16, NULL,
594 iemAImpl_bt_u32, NULL,
595 iemAImpl_bt_u64, NULL
596};
597
598/** Function table for the BTC instruction. */
599IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
600{
601 NULL, NULL,
602 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
603 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
604 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
605};
606
607/** Function table for the BTR instruction. */
608IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
609{
610 NULL, NULL,
611 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
612 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
613 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
614};
615
616/** Function table for the BTS instruction. */
617IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
618{
619 NULL, NULL,
620 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
621 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
622 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
623};
624
625/** Function table for the BSF instruction. */
626IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
627{
628 NULL, NULL,
629 iemAImpl_bsf_u16, NULL,
630 iemAImpl_bsf_u32, NULL,
631 iemAImpl_bsf_u64, NULL
632};
633
634/** Function table for the BSR instruction. */
635IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
636{
637 NULL, NULL,
638 iemAImpl_bsr_u16, NULL,
639 iemAImpl_bsr_u32, NULL,
640 iemAImpl_bsr_u64, NULL
641};
642
643/** Function table for the IMUL instruction. */
644IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
645{
646 NULL, NULL,
647 iemAImpl_imul_two_u16, NULL,
648 iemAImpl_imul_two_u32, NULL,
649 iemAImpl_imul_two_u64, NULL
650};
651
652/** Group 1 /r lookup table. */
653IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
654{
655 &g_iemAImpl_add,
656 &g_iemAImpl_or,
657 &g_iemAImpl_adc,
658 &g_iemAImpl_sbb,
659 &g_iemAImpl_and,
660 &g_iemAImpl_sub,
661 &g_iemAImpl_xor,
662 &g_iemAImpl_cmp
663};
664
665/** Function table for the INC instruction. */
666IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
667{
668 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
669 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
670 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
671 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
672};
673
674/** Function table for the DEC instruction. */
675IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
676{
677 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
678 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
679 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
680 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
681};
682
683/** Function table for the NEG instruction. */
684IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
685{
686 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
687 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
688 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
689 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
690};
691
692/** Function table for the NOT instruction. */
693IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
694{
695 iemAImpl_not_u8, iemAImpl_not_u8_locked,
696 iemAImpl_not_u16, iemAImpl_not_u16_locked,
697 iemAImpl_not_u32, iemAImpl_not_u32_locked,
698 iemAImpl_not_u64, iemAImpl_not_u64_locked
699};
700
701
702/** Function table for the ROL instruction. */
703IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
704{
705 iemAImpl_rol_u8,
706 iemAImpl_rol_u16,
707 iemAImpl_rol_u32,
708 iemAImpl_rol_u64
709};
710
711/** Function table for the ROR instruction. */
712IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
713{
714 iemAImpl_ror_u8,
715 iemAImpl_ror_u16,
716 iemAImpl_ror_u32,
717 iemAImpl_ror_u64
718};
719
720/** Function table for the RCL instruction. */
721IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
722{
723 iemAImpl_rcl_u8,
724 iemAImpl_rcl_u16,
725 iemAImpl_rcl_u32,
726 iemAImpl_rcl_u64
727};
728
729/** Function table for the RCR instruction. */
730IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
731{
732 iemAImpl_rcr_u8,
733 iemAImpl_rcr_u16,
734 iemAImpl_rcr_u32,
735 iemAImpl_rcr_u64
736};
737
738/** Function table for the SHL instruction. */
739IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
740{
741 iemAImpl_shl_u8,
742 iemAImpl_shl_u16,
743 iemAImpl_shl_u32,
744 iemAImpl_shl_u64
745};
746
747/** Function table for the SHR instruction. */
748IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
749{
750 iemAImpl_shr_u8,
751 iemAImpl_shr_u16,
752 iemAImpl_shr_u32,
753 iemAImpl_shr_u64
754};
755
756/** Function table for the SAR instruction. */
757IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
758{
759 iemAImpl_sar_u8,
760 iemAImpl_sar_u16,
761 iemAImpl_sar_u32,
762 iemAImpl_sar_u64
763};
764
765
766/** Function table for the MUL instruction. */
767IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
768{
769 iemAImpl_mul_u8,
770 iemAImpl_mul_u16,
771 iemAImpl_mul_u32,
772 iemAImpl_mul_u64
773};
774
775/** Function table for the IMUL instruction working implicitly on rAX. */
776IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
777{
778 iemAImpl_imul_u8,
779 iemAImpl_imul_u16,
780 iemAImpl_imul_u32,
781 iemAImpl_imul_u64
782};
783
784/** Function table for the DIV instruction. */
785IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
786{
787 iemAImpl_div_u8,
788 iemAImpl_div_u16,
789 iemAImpl_div_u32,
790 iemAImpl_div_u64
791};
792
793/** Function table for the MUL instruction. */
794IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
795{
796 iemAImpl_idiv_u8,
797 iemAImpl_idiv_u16,
798 iemAImpl_idiv_u32,
799 iemAImpl_idiv_u64
800};
801
802/** Function table for the SHLD instruction */
803IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
804{
805 iemAImpl_shld_u16,
806 iemAImpl_shld_u32,
807 iemAImpl_shld_u64,
808};
809
810/** Function table for the SHRD instruction */
811IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
812{
813 iemAImpl_shrd_u16,
814 iemAImpl_shrd_u32,
815 iemAImpl_shrd_u64,
816};
817
818
819/** Function table for the PUNPCKLBW instruction */
820IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
821/** Function table for the PUNPCKLBD instruction */
822IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
823/** Function table for the PUNPCKLDQ instruction */
824IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
825/** Function table for the PUNPCKLQDQ instruction */
826IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
827
828/** Function table for the PUNPCKHBW instruction */
829IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
830/** Function table for the PUNPCKHBD instruction */
831IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
832/** Function table for the PUNPCKHDQ instruction */
833IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
834/** Function table for the PUNPCKHQDQ instruction */
835IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
836
837/** Function table for the PXOR instruction */
838IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
839/** Function table for the PCMPEQB instruction */
840IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
841/** Function table for the PCMPEQW instruction */
842IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
843/** Function table for the PCMPEQD instruction */
844IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
845
846
847#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
848/** What IEM just wrote. */
849uint8_t g_abIemWrote[256];
850/** How much IEM just wrote. */
851size_t g_cbIemWrote;
852#endif
853
854
855/*********************************************************************************************************************************
856* Internal Functions *
857*********************************************************************************************************************************/
858IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
859IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
860IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
861IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
862/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
863IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
864IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
865IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
866IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
867IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
868IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
869IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
870IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
871IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
872IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
873IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
874IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
875#ifdef IEM_WITH_SETJMP
876DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
877DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
878DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
879DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
880DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
881#endif
882
883IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
884IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
885IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
886IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
887IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
888IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
889IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
890IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
891IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
892IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
893IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
894IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
895IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
896IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
897IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
898IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
899IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
900
901#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
902IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
903#endif
904IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
905IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
906
907#ifdef VBOX_WITH_NESTED_HWVIRT
908IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
909 uint64_t uExitInfo2);
910IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
911 uint32_t uErr, uint64_t uCr2);
912#endif
913
914/**
915 * Sets the pass up status.
916 *
917 * @returns VINF_SUCCESS.
918 * @param pVCpu The cross context virtual CPU structure of the
919 * calling thread.
920 * @param rcPassUp The pass up status. Must be informational.
921 * VINF_SUCCESS is not allowed.
922 */
923IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
924{
925 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
926
927 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
928 if (rcOldPassUp == VINF_SUCCESS)
929 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
930 /* If both are EM scheduling codes, use EM priority rules. */
931 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
932 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
933 {
934 if (rcPassUp < rcOldPassUp)
935 {
936 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
937 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
938 }
939 else
940 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
941 }
942 /* Override EM scheduling with specific status code. */
943 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
944 {
945 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
946 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
947 }
948 /* Don't override specific status code, first come first served. */
949 else
950 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
951 return VINF_SUCCESS;
952}
953
954
955/**
956 * Calculates the CPU mode.
957 *
958 * This is mainly for updating IEMCPU::enmCpuMode.
959 *
960 * @returns CPU mode.
961 * @param pCtx The register context for the CPU.
962 */
963DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
964{
965 if (CPUMIsGuestIn64BitCodeEx(pCtx))
966 return IEMMODE_64BIT;
967 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
968 return IEMMODE_32BIT;
969 return IEMMODE_16BIT;
970}
971
972
973/**
974 * Initializes the execution state.
975 *
976 * @param pVCpu The cross context virtual CPU structure of the
977 * calling thread.
978 * @param fBypassHandlers Whether to bypass access handlers.
979 *
980 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
981 * side-effects in strict builds.
982 */
983DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
984{
985 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
986
987 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
988
989#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
990 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
991 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
992 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
993 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
994 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
995 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
996 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
997 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
998#endif
999
1000#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1001 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1002#endif
1003 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1004 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1005#ifdef VBOX_STRICT
1006 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1007 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1008 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1009 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1010 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1011 pVCpu->iem.s.uRexReg = 127;
1012 pVCpu->iem.s.uRexB = 127;
1013 pVCpu->iem.s.uRexIndex = 127;
1014 pVCpu->iem.s.iEffSeg = 127;
1015 pVCpu->iem.s.idxPrefix = 127;
1016 pVCpu->iem.s.uVex3rdReg = 127;
1017 pVCpu->iem.s.uVexLength = 127;
1018 pVCpu->iem.s.fEvexStuff = 127;
1019 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1020# ifdef IEM_WITH_CODE_TLB
1021 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1022 pVCpu->iem.s.pbInstrBuf = NULL;
1023 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1024 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1025 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1026 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1027# else
1028 pVCpu->iem.s.offOpcode = 127;
1029 pVCpu->iem.s.cbOpcode = 127;
1030# endif
1031#endif
1032
1033 pVCpu->iem.s.cActiveMappings = 0;
1034 pVCpu->iem.s.iNextMapping = 0;
1035 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1036 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1037#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1038 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1039 && pCtx->cs.u64Base == 0
1040 && pCtx->cs.u32Limit == UINT32_MAX
1041 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1042 if (!pVCpu->iem.s.fInPatchCode)
1043 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1044#endif
1045
1046#ifdef IEM_VERIFICATION_MODE_FULL
1047 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1048 pVCpu->iem.s.fNoRem = true;
1049#endif
1050}
1051
1052#ifdef VBOX_WITH_NESTED_HWVIRT
1053/**
1054 * Performs a minimal reinitialization of the execution state.
1055 *
1056 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1057 * 'world-switch' types operations on the CPU. Currently only nested
1058 * hardware-virtualization uses it.
1059 *
1060 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1061 */
1062IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1063{
1064 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1065 IEMMODE const enmMode = iemCalcCpuMode(pCtx);
1066 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1067
1068 pVCpu->iem.s.uCpl = uCpl;
1069 pVCpu->iem.s.enmCpuMode = enmMode;
1070 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1071 pVCpu->iem.s.enmEffAddrMode = enmMode;
1072 if (enmMode != IEMMODE_64BIT)
1073 {
1074 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1075 pVCpu->iem.s.enmEffOpSize = enmMode;
1076 }
1077 else
1078 {
1079 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1080 pVCpu->iem.s.enmEffOpSize = enmMode;
1081 }
1082 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1083#ifndef IEM_WITH_CODE_TLB
1084 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1085 pVCpu->iem.s.offOpcode = 0;
1086 pVCpu->iem.s.cbOpcode = 0;
1087#endif
1088}
1089#endif
1090
1091/**
1092 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1093 *
1094 * @param pVCpu The cross context virtual CPU structure of the
1095 * calling thread.
1096 */
1097DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1098{
1099 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1100#ifdef IEM_VERIFICATION_MODE_FULL
1101 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1102#endif
1103#ifdef VBOX_STRICT
1104# ifdef IEM_WITH_CODE_TLB
1105 NOREF(pVCpu);
1106# else
1107 pVCpu->iem.s.cbOpcode = 0;
1108# endif
1109#else
1110 NOREF(pVCpu);
1111#endif
1112}
1113
1114
1115/**
1116 * Initializes the decoder state.
1117 *
1118 * iemReInitDecoder is mostly a copy of this function.
1119 *
1120 * @param pVCpu The cross context virtual CPU structure of the
1121 * calling thread.
1122 * @param fBypassHandlers Whether to bypass access handlers.
1123 */
1124DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1125{
1126 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1127
1128 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1129
1130#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1131 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1132 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1133 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1134 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1135 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1136 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1137 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1138 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1139#endif
1140
1141#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1142 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1143#endif
1144 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1145#ifdef IEM_VERIFICATION_MODE_FULL
1146 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1147 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1148#endif
1149 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1150 pVCpu->iem.s.enmCpuMode = enmMode;
1151 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1152 pVCpu->iem.s.enmEffAddrMode = enmMode;
1153 if (enmMode != IEMMODE_64BIT)
1154 {
1155 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1156 pVCpu->iem.s.enmEffOpSize = enmMode;
1157 }
1158 else
1159 {
1160 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1161 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1162 }
1163 pVCpu->iem.s.fPrefixes = 0;
1164 pVCpu->iem.s.uRexReg = 0;
1165 pVCpu->iem.s.uRexB = 0;
1166 pVCpu->iem.s.uRexIndex = 0;
1167 pVCpu->iem.s.idxPrefix = 0;
1168 pVCpu->iem.s.uVex3rdReg = 0;
1169 pVCpu->iem.s.uVexLength = 0;
1170 pVCpu->iem.s.fEvexStuff = 0;
1171 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1172#ifdef IEM_WITH_CODE_TLB
1173 pVCpu->iem.s.pbInstrBuf = NULL;
1174 pVCpu->iem.s.offInstrNextByte = 0;
1175 pVCpu->iem.s.offCurInstrStart = 0;
1176# ifdef VBOX_STRICT
1177 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1178 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1179 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1180# endif
1181#else
1182 pVCpu->iem.s.offOpcode = 0;
1183 pVCpu->iem.s.cbOpcode = 0;
1184#endif
1185 pVCpu->iem.s.cActiveMappings = 0;
1186 pVCpu->iem.s.iNextMapping = 0;
1187 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1188 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1189#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1190 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1191 && pCtx->cs.u64Base == 0
1192 && pCtx->cs.u32Limit == UINT32_MAX
1193 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1194 if (!pVCpu->iem.s.fInPatchCode)
1195 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1196#endif
1197
1198#ifdef DBGFTRACE_ENABLED
1199 switch (enmMode)
1200 {
1201 case IEMMODE_64BIT:
1202 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1203 break;
1204 case IEMMODE_32BIT:
1205 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1206 break;
1207 case IEMMODE_16BIT:
1208 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1209 break;
1210 }
1211#endif
1212}
1213
1214
1215/**
1216 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1217 *
1218 * This is mostly a copy of iemInitDecoder.
1219 *
1220 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1221 */
1222DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1223{
1224 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1225
1226 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1227
1228#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1229 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1230 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1231 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1232 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1233 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1234 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1235 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1236 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1237#endif
1238
1239 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1240#ifdef IEM_VERIFICATION_MODE_FULL
1241 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1242 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1243#endif
1244 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1245 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1246 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1247 pVCpu->iem.s.enmEffAddrMode = enmMode;
1248 if (enmMode != IEMMODE_64BIT)
1249 {
1250 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1251 pVCpu->iem.s.enmEffOpSize = enmMode;
1252 }
1253 else
1254 {
1255 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1256 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1257 }
1258 pVCpu->iem.s.fPrefixes = 0;
1259 pVCpu->iem.s.uRexReg = 0;
1260 pVCpu->iem.s.uRexB = 0;
1261 pVCpu->iem.s.uRexIndex = 0;
1262 pVCpu->iem.s.idxPrefix = 0;
1263 pVCpu->iem.s.uVex3rdReg = 0;
1264 pVCpu->iem.s.uVexLength = 0;
1265 pVCpu->iem.s.fEvexStuff = 0;
1266 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1267#ifdef IEM_WITH_CODE_TLB
1268 if (pVCpu->iem.s.pbInstrBuf)
1269 {
1270 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1271 - pVCpu->iem.s.uInstrBufPc;
1272 if (off < pVCpu->iem.s.cbInstrBufTotal)
1273 {
1274 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1275 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1276 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1277 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1278 else
1279 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1280 }
1281 else
1282 {
1283 pVCpu->iem.s.pbInstrBuf = NULL;
1284 pVCpu->iem.s.offInstrNextByte = 0;
1285 pVCpu->iem.s.offCurInstrStart = 0;
1286 pVCpu->iem.s.cbInstrBuf = 0;
1287 pVCpu->iem.s.cbInstrBufTotal = 0;
1288 }
1289 }
1290 else
1291 {
1292 pVCpu->iem.s.offInstrNextByte = 0;
1293 pVCpu->iem.s.offCurInstrStart = 0;
1294 pVCpu->iem.s.cbInstrBuf = 0;
1295 pVCpu->iem.s.cbInstrBufTotal = 0;
1296 }
1297#else
1298 pVCpu->iem.s.cbOpcode = 0;
1299 pVCpu->iem.s.offOpcode = 0;
1300#endif
1301 Assert(pVCpu->iem.s.cActiveMappings == 0);
1302 pVCpu->iem.s.iNextMapping = 0;
1303 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1304 Assert(pVCpu->iem.s.fBypassHandlers == false);
1305#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1306 if (!pVCpu->iem.s.fInPatchCode)
1307 { /* likely */ }
1308 else
1309 {
1310 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1311 && pCtx->cs.u64Base == 0
1312 && pCtx->cs.u32Limit == UINT32_MAX
1313 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1314 if (!pVCpu->iem.s.fInPatchCode)
1315 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1316 }
1317#endif
1318
1319#ifdef DBGFTRACE_ENABLED
1320 switch (enmMode)
1321 {
1322 case IEMMODE_64BIT:
1323 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1324 break;
1325 case IEMMODE_32BIT:
1326 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1327 break;
1328 case IEMMODE_16BIT:
1329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1330 break;
1331 }
1332#endif
1333}
1334
1335
1336
1337/**
1338 * Prefetch opcodes the first time when starting executing.
1339 *
1340 * @returns Strict VBox status code.
1341 * @param pVCpu The cross context virtual CPU structure of the
1342 * calling thread.
1343 * @param fBypassHandlers Whether to bypass access handlers.
1344 */
1345IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1346{
1347#ifdef IEM_VERIFICATION_MODE_FULL
1348 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1349#endif
1350 iemInitDecoder(pVCpu, fBypassHandlers);
1351
1352#ifdef IEM_WITH_CODE_TLB
1353 /** @todo Do ITLB lookup here. */
1354
1355#else /* !IEM_WITH_CODE_TLB */
1356
1357 /*
1358 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1359 *
1360 * First translate CS:rIP to a physical address.
1361 */
1362 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1363 uint32_t cbToTryRead;
1364 RTGCPTR GCPtrPC;
1365 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1366 {
1367 cbToTryRead = PAGE_SIZE;
1368 GCPtrPC = pCtx->rip;
1369 if (IEM_IS_CANONICAL(GCPtrPC))
1370 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1371 else
1372 return iemRaiseGeneralProtectionFault0(pVCpu);
1373 }
1374 else
1375 {
1376 uint32_t GCPtrPC32 = pCtx->eip;
1377 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1378 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1379 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1380 else
1381 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1382 if (cbToTryRead) { /* likely */ }
1383 else /* overflowed */
1384 {
1385 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1386 cbToTryRead = UINT32_MAX;
1387 }
1388 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1389 Assert(GCPtrPC <= UINT32_MAX);
1390 }
1391
1392# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1393 /* Allow interpretation of patch manager code blocks since they can for
1394 instance throw #PFs for perfectly good reasons. */
1395 if (pVCpu->iem.s.fInPatchCode)
1396 {
1397 size_t cbRead = 0;
1398 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1399 AssertRCReturn(rc, rc);
1400 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1401 return VINF_SUCCESS;
1402 }
1403# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1404
1405 RTGCPHYS GCPhys;
1406 uint64_t fFlags;
1407 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1408 if (RT_SUCCESS(rc)) { /* probable */ }
1409 else
1410 {
1411 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1412 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1413 }
1414 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1415 else
1416 {
1417 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1418 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1419 }
1420 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1421 else
1422 {
1423 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1424 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1425 }
1426 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1427 /** @todo Check reserved bits and such stuff. PGM is better at doing
1428 * that, so do it when implementing the guest virtual address
1429 * TLB... */
1430
1431# ifdef IEM_VERIFICATION_MODE_FULL
1432 /*
1433 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1434 * instruction.
1435 */
1436 /** @todo optimize this differently by not using PGMPhysRead. */
1437 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1438 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1439 if ( offPrevOpcodes < cbOldOpcodes
1440 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1441 {
1442 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1443 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1444 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1445 pVCpu->iem.s.cbOpcode = cbNew;
1446 return VINF_SUCCESS;
1447 }
1448# endif
1449
1450 /*
1451 * Read the bytes at this address.
1452 */
1453 PVM pVM = pVCpu->CTX_SUFF(pVM);
1454# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1455 size_t cbActual;
1456 if ( PATMIsEnabled(pVM)
1457 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1458 {
1459 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1460 Assert(cbActual > 0);
1461 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1462 }
1463 else
1464# endif
1465 {
1466 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1467 if (cbToTryRead > cbLeftOnPage)
1468 cbToTryRead = cbLeftOnPage;
1469 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1470 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1471
1472 if (!pVCpu->iem.s.fBypassHandlers)
1473 {
1474 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1475 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1476 { /* likely */ }
1477 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1478 {
1479 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1480 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1481 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1482 }
1483 else
1484 {
1485 Log((RT_SUCCESS(rcStrict)
1486 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1487 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1488 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1489 return rcStrict;
1490 }
1491 }
1492 else
1493 {
1494 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1495 if (RT_SUCCESS(rc))
1496 { /* likely */ }
1497 else
1498 {
1499 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1500 GCPtrPC, GCPhys, rc, cbToTryRead));
1501 return rc;
1502 }
1503 }
1504 pVCpu->iem.s.cbOpcode = cbToTryRead;
1505 }
1506#endif /* !IEM_WITH_CODE_TLB */
1507 return VINF_SUCCESS;
1508}
1509
1510
1511/**
1512 * Invalidates the IEM TLBs.
1513 *
1514 * This is called internally as well as by PGM when moving GC mappings.
1515 *
1516 * @returns
1517 * @param pVCpu The cross context virtual CPU structure of the calling
1518 * thread.
1519 * @param fVmm Set when PGM calls us with a remapping.
1520 */
1521VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1522{
1523#ifdef IEM_WITH_CODE_TLB
1524 pVCpu->iem.s.cbInstrBufTotal = 0;
1525 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1526 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1527 { /* very likely */ }
1528 else
1529 {
1530 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1531 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1532 while (i-- > 0)
1533 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1534 }
1535#endif
1536
1537#ifdef IEM_WITH_DATA_TLB
1538 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1539 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1540 { /* very likely */ }
1541 else
1542 {
1543 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1544 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1545 while (i-- > 0)
1546 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1547 }
1548#endif
1549 NOREF(pVCpu); NOREF(fVmm);
1550}
1551
1552
1553/**
1554 * Invalidates a page in the TLBs.
1555 *
1556 * @param pVCpu The cross context virtual CPU structure of the calling
1557 * thread.
1558 * @param GCPtr The address of the page to invalidate
1559 */
1560VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1561{
1562#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1563 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1564 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1565 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1566 uintptr_t idx = (uint8_t)GCPtr;
1567
1568# ifdef IEM_WITH_CODE_TLB
1569 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1570 {
1571 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1572 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1573 pVCpu->iem.s.cbInstrBufTotal = 0;
1574 }
1575# endif
1576
1577# ifdef IEM_WITH_DATA_TLB
1578 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1579 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1580# endif
1581#else
1582 NOREF(pVCpu); NOREF(GCPtr);
1583#endif
1584}
1585
1586
1587/**
1588 * Invalidates the host physical aspects of the IEM TLBs.
1589 *
1590 * This is called internally as well as by PGM when moving GC mappings.
1591 *
1592 * @param pVCpu The cross context virtual CPU structure of the calling
1593 * thread.
1594 */
1595VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1596{
1597#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1598 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1599
1600# ifdef IEM_WITH_CODE_TLB
1601 pVCpu->iem.s.cbInstrBufTotal = 0;
1602# endif
1603 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1604 if (uTlbPhysRev != 0)
1605 {
1606 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1607 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1608 }
1609 else
1610 {
1611 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1612 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1613
1614 unsigned i;
1615# ifdef IEM_WITH_CODE_TLB
1616 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1617 while (i-- > 0)
1618 {
1619 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1620 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1621 }
1622# endif
1623# ifdef IEM_WITH_DATA_TLB
1624 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1625 while (i-- > 0)
1626 {
1627 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1628 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1629 }
1630# endif
1631 }
1632#else
1633 NOREF(pVCpu);
1634#endif
1635}
1636
1637
1638/**
1639 * Invalidates the host physical aspects of the IEM TLBs.
1640 *
1641 * This is called internally as well as by PGM when moving GC mappings.
1642 *
1643 * @param pVM The cross context VM structure.
1644 *
1645 * @remarks Caller holds the PGM lock.
1646 */
1647VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1648{
1649 RT_NOREF_PV(pVM);
1650}
1651
1652#ifdef IEM_WITH_CODE_TLB
1653
1654/**
1655 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1656 * failure and jumps.
1657 *
1658 * We end up here for a number of reasons:
1659 * - pbInstrBuf isn't yet initialized.
1660 * - Advancing beyond the buffer boundrary (e.g. cross page).
1661 * - Advancing beyond the CS segment limit.
1662 * - Fetching from non-mappable page (e.g. MMIO).
1663 *
1664 * @param pVCpu The cross context virtual CPU structure of the
1665 * calling thread.
1666 * @param pvDst Where to return the bytes.
1667 * @param cbDst Number of bytes to read.
1668 *
1669 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1670 */
1671IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1672{
1673#ifdef IN_RING3
1674//__debugbreak();
1675 for (;;)
1676 {
1677 Assert(cbDst <= 8);
1678 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1679
1680 /*
1681 * We might have a partial buffer match, deal with that first to make the
1682 * rest simpler. This is the first part of the cross page/buffer case.
1683 */
1684 if (pVCpu->iem.s.pbInstrBuf != NULL)
1685 {
1686 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1687 {
1688 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1689 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1690 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1691
1692 cbDst -= cbCopy;
1693 pvDst = (uint8_t *)pvDst + cbCopy;
1694 offBuf += cbCopy;
1695 pVCpu->iem.s.offInstrNextByte += offBuf;
1696 }
1697 }
1698
1699 /*
1700 * Check segment limit, figuring how much we're allowed to access at this point.
1701 *
1702 * We will fault immediately if RIP is past the segment limit / in non-canonical
1703 * territory. If we do continue, there are one or more bytes to read before we
1704 * end up in trouble and we need to do that first before faulting.
1705 */
1706 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1707 RTGCPTR GCPtrFirst;
1708 uint32_t cbMaxRead;
1709 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1710 {
1711 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1712 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1713 { /* likely */ }
1714 else
1715 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1716 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1717 }
1718 else
1719 {
1720 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1721 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1722 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1723 { /* likely */ }
1724 else
1725 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1726 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1727 if (cbMaxRead != 0)
1728 { /* likely */ }
1729 else
1730 {
1731 /* Overflowed because address is 0 and limit is max. */
1732 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1733 cbMaxRead = X86_PAGE_SIZE;
1734 }
1735 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1736 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1737 if (cbMaxRead2 < cbMaxRead)
1738 cbMaxRead = cbMaxRead2;
1739 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1740 }
1741
1742 /*
1743 * Get the TLB entry for this piece of code.
1744 */
1745 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1746 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1747 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1748 if (pTlbe->uTag == uTag)
1749 {
1750 /* likely when executing lots of code, otherwise unlikely */
1751# ifdef VBOX_WITH_STATISTICS
1752 pVCpu->iem.s.CodeTlb.cTlbHits++;
1753# endif
1754 }
1755 else
1756 {
1757 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1758# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1759 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1760 {
1761 pTlbe->uTag = uTag;
1762 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1763 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1764 pTlbe->GCPhys = NIL_RTGCPHYS;
1765 pTlbe->pbMappingR3 = NULL;
1766 }
1767 else
1768# endif
1769 {
1770 RTGCPHYS GCPhys;
1771 uint64_t fFlags;
1772 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1773 if (RT_FAILURE(rc))
1774 {
1775 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1776 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1777 }
1778
1779 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1780 pTlbe->uTag = uTag;
1781 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1782 pTlbe->GCPhys = GCPhys;
1783 pTlbe->pbMappingR3 = NULL;
1784 }
1785 }
1786
1787 /*
1788 * Check TLB page table level access flags.
1789 */
1790 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1791 {
1792 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1793 {
1794 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1795 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1796 }
1797 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1798 {
1799 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1800 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1801 }
1802 }
1803
1804# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1805 /*
1806 * Allow interpretation of patch manager code blocks since they can for
1807 * instance throw #PFs for perfectly good reasons.
1808 */
1809 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1810 { /* no unlikely */ }
1811 else
1812 {
1813 /** @todo Could be optimized this a little in ring-3 if we liked. */
1814 size_t cbRead = 0;
1815 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1816 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1817 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1818 return;
1819 }
1820# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1821
1822 /*
1823 * Look up the physical page info if necessary.
1824 */
1825 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1826 { /* not necessary */ }
1827 else
1828 {
1829 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1830 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1831 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1832 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1833 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1834 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1835 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1836 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1837 }
1838
1839# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1840 /*
1841 * Try do a direct read using the pbMappingR3 pointer.
1842 */
1843 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1844 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1845 {
1846 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1847 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1848 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1849 {
1850 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1851 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1852 }
1853 else
1854 {
1855 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1856 Assert(cbInstr < cbMaxRead);
1857 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1858 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1859 }
1860 if (cbDst <= cbMaxRead)
1861 {
1862 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1863 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1864 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1865 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1866 return;
1867 }
1868 pVCpu->iem.s.pbInstrBuf = NULL;
1869
1870 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1871 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1872 }
1873 else
1874# endif
1875#if 0
1876 /*
1877 * If there is no special read handling, so we can read a bit more and
1878 * put it in the prefetch buffer.
1879 */
1880 if ( cbDst < cbMaxRead
1881 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1882 {
1883 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1884 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1885 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1886 { /* likely */ }
1887 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1888 {
1889 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1890 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1891 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1892 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1893 }
1894 else
1895 {
1896 Log((RT_SUCCESS(rcStrict)
1897 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1898 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1899 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1900 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1901 }
1902 }
1903 /*
1904 * Special read handling, so only read exactly what's needed.
1905 * This is a highly unlikely scenario.
1906 */
1907 else
1908#endif
1909 {
1910 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1911 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1912 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1913 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1914 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1915 { /* likely */ }
1916 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1917 {
1918 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1919 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1920 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1921 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1922 }
1923 else
1924 {
1925 Log((RT_SUCCESS(rcStrict)
1926 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1927 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1928 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1929 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1930 }
1931 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1932 if (cbToRead == cbDst)
1933 return;
1934 }
1935
1936 /*
1937 * More to read, loop.
1938 */
1939 cbDst -= cbMaxRead;
1940 pvDst = (uint8_t *)pvDst + cbMaxRead;
1941 }
1942#else
1943 RT_NOREF(pvDst, cbDst);
1944 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1945#endif
1946}
1947
1948#else
1949
1950/**
1951 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1952 * exception if it fails.
1953 *
1954 * @returns Strict VBox status code.
1955 * @param pVCpu The cross context virtual CPU structure of the
1956 * calling thread.
1957 * @param cbMin The minimum number of bytes relative offOpcode
1958 * that must be read.
1959 */
1960IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1961{
1962 /*
1963 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1964 *
1965 * First translate CS:rIP to a physical address.
1966 */
1967 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1968 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1969 uint32_t cbToTryRead;
1970 RTGCPTR GCPtrNext;
1971 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1972 {
1973 cbToTryRead = PAGE_SIZE;
1974 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1975 if (!IEM_IS_CANONICAL(GCPtrNext))
1976 return iemRaiseGeneralProtectionFault0(pVCpu);
1977 }
1978 else
1979 {
1980 uint32_t GCPtrNext32 = pCtx->eip;
1981 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1982 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1983 if (GCPtrNext32 > pCtx->cs.u32Limit)
1984 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1985 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1986 if (!cbToTryRead) /* overflowed */
1987 {
1988 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1989 cbToTryRead = UINT32_MAX;
1990 /** @todo check out wrapping around the code segment. */
1991 }
1992 if (cbToTryRead < cbMin - cbLeft)
1993 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1994 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1995 }
1996
1997 /* Only read up to the end of the page, and make sure we don't read more
1998 than the opcode buffer can hold. */
1999 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2000 if (cbToTryRead > cbLeftOnPage)
2001 cbToTryRead = cbLeftOnPage;
2002 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2003 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2004/** @todo r=bird: Convert assertion into undefined opcode exception? */
2005 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2006
2007# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2008 /* Allow interpretation of patch manager code blocks since they can for
2009 instance throw #PFs for perfectly good reasons. */
2010 if (pVCpu->iem.s.fInPatchCode)
2011 {
2012 size_t cbRead = 0;
2013 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2014 AssertRCReturn(rc, rc);
2015 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2016 return VINF_SUCCESS;
2017 }
2018# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2019
2020 RTGCPHYS GCPhys;
2021 uint64_t fFlags;
2022 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2023 if (RT_FAILURE(rc))
2024 {
2025 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2026 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2027 }
2028 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2029 {
2030 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2031 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2032 }
2033 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2034 {
2035 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2036 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2037 }
2038 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2039 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2040 /** @todo Check reserved bits and such stuff. PGM is better at doing
2041 * that, so do it when implementing the guest virtual address
2042 * TLB... */
2043
2044 /*
2045 * Read the bytes at this address.
2046 *
2047 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2048 * and since PATM should only patch the start of an instruction there
2049 * should be no need to check again here.
2050 */
2051 if (!pVCpu->iem.s.fBypassHandlers)
2052 {
2053 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2054 cbToTryRead, PGMACCESSORIGIN_IEM);
2055 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2056 { /* likely */ }
2057 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2058 {
2059 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2060 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2061 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2062 }
2063 else
2064 {
2065 Log((RT_SUCCESS(rcStrict)
2066 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2067 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2068 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2069 return rcStrict;
2070 }
2071 }
2072 else
2073 {
2074 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2075 if (RT_SUCCESS(rc))
2076 { /* likely */ }
2077 else
2078 {
2079 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2080 return rc;
2081 }
2082 }
2083 pVCpu->iem.s.cbOpcode += cbToTryRead;
2084 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2085
2086 return VINF_SUCCESS;
2087}
2088
2089#endif /* !IEM_WITH_CODE_TLB */
2090#ifndef IEM_WITH_SETJMP
2091
2092/**
2093 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2094 *
2095 * @returns Strict VBox status code.
2096 * @param pVCpu The cross context virtual CPU structure of the
2097 * calling thread.
2098 * @param pb Where to return the opcode byte.
2099 */
2100DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2101{
2102 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2103 if (rcStrict == VINF_SUCCESS)
2104 {
2105 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2106 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2107 pVCpu->iem.s.offOpcode = offOpcode + 1;
2108 }
2109 else
2110 *pb = 0;
2111 return rcStrict;
2112}
2113
2114
2115/**
2116 * Fetches the next opcode byte.
2117 *
2118 * @returns Strict VBox status code.
2119 * @param pVCpu The cross context virtual CPU structure of the
2120 * calling thread.
2121 * @param pu8 Where to return the opcode byte.
2122 */
2123DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2124{
2125 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2126 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2127 {
2128 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2129 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2130 return VINF_SUCCESS;
2131 }
2132 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2133}
2134
2135#else /* IEM_WITH_SETJMP */
2136
2137/**
2138 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2139 *
2140 * @returns The opcode byte.
2141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2142 */
2143DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2144{
2145# ifdef IEM_WITH_CODE_TLB
2146 uint8_t u8;
2147 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2148 return u8;
2149# else
2150 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2151 if (rcStrict == VINF_SUCCESS)
2152 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2153 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2154# endif
2155}
2156
2157
2158/**
2159 * Fetches the next opcode byte, longjmp on error.
2160 *
2161 * @returns The opcode byte.
2162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2163 */
2164DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2165{
2166# ifdef IEM_WITH_CODE_TLB
2167 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2168 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2169 if (RT_LIKELY( pbBuf != NULL
2170 && offBuf < pVCpu->iem.s.cbInstrBuf))
2171 {
2172 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2173 return pbBuf[offBuf];
2174 }
2175# else
2176 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2177 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2178 {
2179 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2180 return pVCpu->iem.s.abOpcode[offOpcode];
2181 }
2182# endif
2183 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2184}
2185
2186#endif /* IEM_WITH_SETJMP */
2187
2188/**
2189 * Fetches the next opcode byte, returns automatically on failure.
2190 *
2191 * @param a_pu8 Where to return the opcode byte.
2192 * @remark Implicitly references pVCpu.
2193 */
2194#ifndef IEM_WITH_SETJMP
2195# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2196 do \
2197 { \
2198 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2199 if (rcStrict2 == VINF_SUCCESS) \
2200 { /* likely */ } \
2201 else \
2202 return rcStrict2; \
2203 } while (0)
2204#else
2205# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2206#endif /* IEM_WITH_SETJMP */
2207
2208
2209#ifndef IEM_WITH_SETJMP
2210/**
2211 * Fetches the next signed byte from the opcode stream.
2212 *
2213 * @returns Strict VBox status code.
2214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2215 * @param pi8 Where to return the signed byte.
2216 */
2217DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2218{
2219 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2220}
2221#endif /* !IEM_WITH_SETJMP */
2222
2223
2224/**
2225 * Fetches the next signed byte from the opcode stream, returning automatically
2226 * on failure.
2227 *
2228 * @param a_pi8 Where to return the signed byte.
2229 * @remark Implicitly references pVCpu.
2230 */
2231#ifndef IEM_WITH_SETJMP
2232# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2233 do \
2234 { \
2235 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2236 if (rcStrict2 != VINF_SUCCESS) \
2237 return rcStrict2; \
2238 } while (0)
2239#else /* IEM_WITH_SETJMP */
2240# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2241
2242#endif /* IEM_WITH_SETJMP */
2243
2244#ifndef IEM_WITH_SETJMP
2245
2246/**
2247 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2248 *
2249 * @returns Strict VBox status code.
2250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2251 * @param pu16 Where to return the opcode dword.
2252 */
2253DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2254{
2255 uint8_t u8;
2256 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2257 if (rcStrict == VINF_SUCCESS)
2258 *pu16 = (int8_t)u8;
2259 return rcStrict;
2260}
2261
2262
2263/**
2264 * Fetches the next signed byte from the opcode stream, extending it to
2265 * unsigned 16-bit.
2266 *
2267 * @returns Strict VBox status code.
2268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2269 * @param pu16 Where to return the unsigned word.
2270 */
2271DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2272{
2273 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2274 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2275 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2276
2277 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2278 pVCpu->iem.s.offOpcode = offOpcode + 1;
2279 return VINF_SUCCESS;
2280}
2281
2282#endif /* !IEM_WITH_SETJMP */
2283
2284/**
2285 * Fetches the next signed byte from the opcode stream and sign-extending it to
2286 * a word, returning automatically on failure.
2287 *
2288 * @param a_pu16 Where to return the word.
2289 * @remark Implicitly references pVCpu.
2290 */
2291#ifndef IEM_WITH_SETJMP
2292# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2293 do \
2294 { \
2295 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2296 if (rcStrict2 != VINF_SUCCESS) \
2297 return rcStrict2; \
2298 } while (0)
2299#else
2300# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2301#endif
2302
2303#ifndef IEM_WITH_SETJMP
2304
2305/**
2306 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2307 *
2308 * @returns Strict VBox status code.
2309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2310 * @param pu32 Where to return the opcode dword.
2311 */
2312DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2313{
2314 uint8_t u8;
2315 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2316 if (rcStrict == VINF_SUCCESS)
2317 *pu32 = (int8_t)u8;
2318 return rcStrict;
2319}
2320
2321
2322/**
2323 * Fetches the next signed byte from the opcode stream, extending it to
2324 * unsigned 32-bit.
2325 *
2326 * @returns Strict VBox status code.
2327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2328 * @param pu32 Where to return the unsigned dword.
2329 */
2330DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2331{
2332 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2333 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2334 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2335
2336 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2337 pVCpu->iem.s.offOpcode = offOpcode + 1;
2338 return VINF_SUCCESS;
2339}
2340
2341#endif /* !IEM_WITH_SETJMP */
2342
2343/**
2344 * Fetches the next signed byte from the opcode stream and sign-extending it to
2345 * a word, returning automatically on failure.
2346 *
2347 * @param a_pu32 Where to return the word.
2348 * @remark Implicitly references pVCpu.
2349 */
2350#ifndef IEM_WITH_SETJMP
2351#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2352 do \
2353 { \
2354 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2355 if (rcStrict2 != VINF_SUCCESS) \
2356 return rcStrict2; \
2357 } while (0)
2358#else
2359# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2360#endif
2361
2362#ifndef IEM_WITH_SETJMP
2363
2364/**
2365 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2366 *
2367 * @returns Strict VBox status code.
2368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2369 * @param pu64 Where to return the opcode qword.
2370 */
2371DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2372{
2373 uint8_t u8;
2374 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2375 if (rcStrict == VINF_SUCCESS)
2376 *pu64 = (int8_t)u8;
2377 return rcStrict;
2378}
2379
2380
2381/**
2382 * Fetches the next signed byte from the opcode stream, extending it to
2383 * unsigned 64-bit.
2384 *
2385 * @returns Strict VBox status code.
2386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2387 * @param pu64 Where to return the unsigned qword.
2388 */
2389DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2390{
2391 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2392 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2393 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2394
2395 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2396 pVCpu->iem.s.offOpcode = offOpcode + 1;
2397 return VINF_SUCCESS;
2398}
2399
2400#endif /* !IEM_WITH_SETJMP */
2401
2402
2403/**
2404 * Fetches the next signed byte from the opcode stream and sign-extending it to
2405 * a word, returning automatically on failure.
2406 *
2407 * @param a_pu64 Where to return the word.
2408 * @remark Implicitly references pVCpu.
2409 */
2410#ifndef IEM_WITH_SETJMP
2411# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2412 do \
2413 { \
2414 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2415 if (rcStrict2 != VINF_SUCCESS) \
2416 return rcStrict2; \
2417 } while (0)
2418#else
2419# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2420#endif
2421
2422
2423#ifndef IEM_WITH_SETJMP
2424
2425/**
2426 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2427 *
2428 * @returns Strict VBox status code.
2429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2430 * @param pu16 Where to return the opcode word.
2431 */
2432DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2433{
2434 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2435 if (rcStrict == VINF_SUCCESS)
2436 {
2437 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2438# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2439 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2440# else
2441 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2442# endif
2443 pVCpu->iem.s.offOpcode = offOpcode + 2;
2444 }
2445 else
2446 *pu16 = 0;
2447 return rcStrict;
2448}
2449
2450
2451/**
2452 * Fetches the next opcode word.
2453 *
2454 * @returns Strict VBox status code.
2455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2456 * @param pu16 Where to return the opcode word.
2457 */
2458DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2459{
2460 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2461 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2462 {
2463 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2464# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2465 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2466# else
2467 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2468# endif
2469 return VINF_SUCCESS;
2470 }
2471 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2472}
2473
2474#else /* IEM_WITH_SETJMP */
2475
2476/**
2477 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2478 *
2479 * @returns The opcode word.
2480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2481 */
2482DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2483{
2484# ifdef IEM_WITH_CODE_TLB
2485 uint16_t u16;
2486 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2487 return u16;
2488# else
2489 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2490 if (rcStrict == VINF_SUCCESS)
2491 {
2492 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2493 pVCpu->iem.s.offOpcode += 2;
2494# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2495 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2496# else
2497 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2498# endif
2499 }
2500 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2501# endif
2502}
2503
2504
2505/**
2506 * Fetches the next opcode word, longjmp on error.
2507 *
2508 * @returns The opcode word.
2509 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2510 */
2511DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2512{
2513# ifdef IEM_WITH_CODE_TLB
2514 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2515 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2516 if (RT_LIKELY( pbBuf != NULL
2517 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2518 {
2519 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2520# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2521 return *(uint16_t const *)&pbBuf[offBuf];
2522# else
2523 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2524# endif
2525 }
2526# else
2527 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2528 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2529 {
2530 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2531# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2532 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2533# else
2534 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2535# endif
2536 }
2537# endif
2538 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2539}
2540
2541#endif /* IEM_WITH_SETJMP */
2542
2543
2544/**
2545 * Fetches the next opcode word, returns automatically on failure.
2546 *
2547 * @param a_pu16 Where to return the opcode word.
2548 * @remark Implicitly references pVCpu.
2549 */
2550#ifndef IEM_WITH_SETJMP
2551# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2552 do \
2553 { \
2554 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2555 if (rcStrict2 != VINF_SUCCESS) \
2556 return rcStrict2; \
2557 } while (0)
2558#else
2559# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2560#endif
2561
2562#ifndef IEM_WITH_SETJMP
2563
2564/**
2565 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2566 *
2567 * @returns Strict VBox status code.
2568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2569 * @param pu32 Where to return the opcode double word.
2570 */
2571DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2572{
2573 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2574 if (rcStrict == VINF_SUCCESS)
2575 {
2576 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2577 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2578 pVCpu->iem.s.offOpcode = offOpcode + 2;
2579 }
2580 else
2581 *pu32 = 0;
2582 return rcStrict;
2583}
2584
2585
2586/**
2587 * Fetches the next opcode word, zero extending it to a double word.
2588 *
2589 * @returns Strict VBox status code.
2590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2591 * @param pu32 Where to return the opcode double word.
2592 */
2593DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2594{
2595 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2596 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2597 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2598
2599 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2600 pVCpu->iem.s.offOpcode = offOpcode + 2;
2601 return VINF_SUCCESS;
2602}
2603
2604#endif /* !IEM_WITH_SETJMP */
2605
2606
2607/**
2608 * Fetches the next opcode word and zero extends it to a double word, returns
2609 * automatically on failure.
2610 *
2611 * @param a_pu32 Where to return the opcode double word.
2612 * @remark Implicitly references pVCpu.
2613 */
2614#ifndef IEM_WITH_SETJMP
2615# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2616 do \
2617 { \
2618 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2619 if (rcStrict2 != VINF_SUCCESS) \
2620 return rcStrict2; \
2621 } while (0)
2622#else
2623# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2624#endif
2625
2626#ifndef IEM_WITH_SETJMP
2627
2628/**
2629 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2630 *
2631 * @returns Strict VBox status code.
2632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2633 * @param pu64 Where to return the opcode quad word.
2634 */
2635DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2636{
2637 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2638 if (rcStrict == VINF_SUCCESS)
2639 {
2640 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2641 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2642 pVCpu->iem.s.offOpcode = offOpcode + 2;
2643 }
2644 else
2645 *pu64 = 0;
2646 return rcStrict;
2647}
2648
2649
2650/**
2651 * Fetches the next opcode word, zero extending it to a quad word.
2652 *
2653 * @returns Strict VBox status code.
2654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2655 * @param pu64 Where to return the opcode quad word.
2656 */
2657DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2658{
2659 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2660 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2661 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2662
2663 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2664 pVCpu->iem.s.offOpcode = offOpcode + 2;
2665 return VINF_SUCCESS;
2666}
2667
2668#endif /* !IEM_WITH_SETJMP */
2669
2670/**
2671 * Fetches the next opcode word and zero extends it to a quad word, returns
2672 * automatically on failure.
2673 *
2674 * @param a_pu64 Where to return the opcode quad word.
2675 * @remark Implicitly references pVCpu.
2676 */
2677#ifndef IEM_WITH_SETJMP
2678# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2679 do \
2680 { \
2681 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2682 if (rcStrict2 != VINF_SUCCESS) \
2683 return rcStrict2; \
2684 } while (0)
2685#else
2686# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2687#endif
2688
2689
2690#ifndef IEM_WITH_SETJMP
2691/**
2692 * Fetches the next signed word from the opcode stream.
2693 *
2694 * @returns Strict VBox status code.
2695 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2696 * @param pi16 Where to return the signed word.
2697 */
2698DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2699{
2700 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2701}
2702#endif /* !IEM_WITH_SETJMP */
2703
2704
2705/**
2706 * Fetches the next signed word from the opcode stream, returning automatically
2707 * on failure.
2708 *
2709 * @param a_pi16 Where to return the signed word.
2710 * @remark Implicitly references pVCpu.
2711 */
2712#ifndef IEM_WITH_SETJMP
2713# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2714 do \
2715 { \
2716 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2717 if (rcStrict2 != VINF_SUCCESS) \
2718 return rcStrict2; \
2719 } while (0)
2720#else
2721# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2722#endif
2723
2724#ifndef IEM_WITH_SETJMP
2725
2726/**
2727 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2728 *
2729 * @returns Strict VBox status code.
2730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2731 * @param pu32 Where to return the opcode dword.
2732 */
2733DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2734{
2735 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2736 if (rcStrict == VINF_SUCCESS)
2737 {
2738 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2739# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2740 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2741# else
2742 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2743 pVCpu->iem.s.abOpcode[offOpcode + 1],
2744 pVCpu->iem.s.abOpcode[offOpcode + 2],
2745 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2746# endif
2747 pVCpu->iem.s.offOpcode = offOpcode + 4;
2748 }
2749 else
2750 *pu32 = 0;
2751 return rcStrict;
2752}
2753
2754
2755/**
2756 * Fetches the next opcode dword.
2757 *
2758 * @returns Strict VBox status code.
2759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2760 * @param pu32 Where to return the opcode double word.
2761 */
2762DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2763{
2764 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2765 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2766 {
2767 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2768# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2769 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2770# else
2771 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2772 pVCpu->iem.s.abOpcode[offOpcode + 1],
2773 pVCpu->iem.s.abOpcode[offOpcode + 2],
2774 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2775# endif
2776 return VINF_SUCCESS;
2777 }
2778 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2779}
2780
2781#else /* !IEM_WITH_SETJMP */
2782
2783/**
2784 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2785 *
2786 * @returns The opcode dword.
2787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2788 */
2789DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2790{
2791# ifdef IEM_WITH_CODE_TLB
2792 uint32_t u32;
2793 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2794 return u32;
2795# else
2796 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2797 if (rcStrict == VINF_SUCCESS)
2798 {
2799 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2800 pVCpu->iem.s.offOpcode = offOpcode + 4;
2801# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2802 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2803# else
2804 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2805 pVCpu->iem.s.abOpcode[offOpcode + 1],
2806 pVCpu->iem.s.abOpcode[offOpcode + 2],
2807 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2808# endif
2809 }
2810 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2811# endif
2812}
2813
2814
2815/**
2816 * Fetches the next opcode dword, longjmp on error.
2817 *
2818 * @returns The opcode dword.
2819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2820 */
2821DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2822{
2823# ifdef IEM_WITH_CODE_TLB
2824 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2825 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2826 if (RT_LIKELY( pbBuf != NULL
2827 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2828 {
2829 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2830# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2831 return *(uint32_t const *)&pbBuf[offBuf];
2832# else
2833 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2834 pbBuf[offBuf + 1],
2835 pbBuf[offBuf + 2],
2836 pbBuf[offBuf + 3]);
2837# endif
2838 }
2839# else
2840 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2841 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2842 {
2843 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2844# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2845 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2846# else
2847 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2848 pVCpu->iem.s.abOpcode[offOpcode + 1],
2849 pVCpu->iem.s.abOpcode[offOpcode + 2],
2850 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2851# endif
2852 }
2853# endif
2854 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2855}
2856
2857#endif /* !IEM_WITH_SETJMP */
2858
2859
2860/**
2861 * Fetches the next opcode dword, returns automatically on failure.
2862 *
2863 * @param a_pu32 Where to return the opcode dword.
2864 * @remark Implicitly references pVCpu.
2865 */
2866#ifndef IEM_WITH_SETJMP
2867# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2868 do \
2869 { \
2870 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2871 if (rcStrict2 != VINF_SUCCESS) \
2872 return rcStrict2; \
2873 } while (0)
2874#else
2875# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2876#endif
2877
2878#ifndef IEM_WITH_SETJMP
2879
2880/**
2881 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2882 *
2883 * @returns Strict VBox status code.
2884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2885 * @param pu64 Where to return the opcode dword.
2886 */
2887DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2888{
2889 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2890 if (rcStrict == VINF_SUCCESS)
2891 {
2892 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2893 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2894 pVCpu->iem.s.abOpcode[offOpcode + 1],
2895 pVCpu->iem.s.abOpcode[offOpcode + 2],
2896 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2897 pVCpu->iem.s.offOpcode = offOpcode + 4;
2898 }
2899 else
2900 *pu64 = 0;
2901 return rcStrict;
2902}
2903
2904
2905/**
2906 * Fetches the next opcode dword, zero extending it to a quad word.
2907 *
2908 * @returns Strict VBox status code.
2909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2910 * @param pu64 Where to return the opcode quad word.
2911 */
2912DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2913{
2914 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2915 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2916 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2917
2918 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2919 pVCpu->iem.s.abOpcode[offOpcode + 1],
2920 pVCpu->iem.s.abOpcode[offOpcode + 2],
2921 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2922 pVCpu->iem.s.offOpcode = offOpcode + 4;
2923 return VINF_SUCCESS;
2924}
2925
2926#endif /* !IEM_WITH_SETJMP */
2927
2928
2929/**
2930 * Fetches the next opcode dword and zero extends it to a quad word, returns
2931 * automatically on failure.
2932 *
2933 * @param a_pu64 Where to return the opcode quad word.
2934 * @remark Implicitly references pVCpu.
2935 */
2936#ifndef IEM_WITH_SETJMP
2937# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2938 do \
2939 { \
2940 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2941 if (rcStrict2 != VINF_SUCCESS) \
2942 return rcStrict2; \
2943 } while (0)
2944#else
2945# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2946#endif
2947
2948
2949#ifndef IEM_WITH_SETJMP
2950/**
2951 * Fetches the next signed double word from the opcode stream.
2952 *
2953 * @returns Strict VBox status code.
2954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2955 * @param pi32 Where to return the signed double word.
2956 */
2957DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2958{
2959 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2960}
2961#endif
2962
2963/**
2964 * Fetches the next signed double word from the opcode stream, returning
2965 * automatically on failure.
2966 *
2967 * @param a_pi32 Where to return the signed double word.
2968 * @remark Implicitly references pVCpu.
2969 */
2970#ifndef IEM_WITH_SETJMP
2971# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2972 do \
2973 { \
2974 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2975 if (rcStrict2 != VINF_SUCCESS) \
2976 return rcStrict2; \
2977 } while (0)
2978#else
2979# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2980#endif
2981
2982#ifndef IEM_WITH_SETJMP
2983
2984/**
2985 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2986 *
2987 * @returns Strict VBox status code.
2988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2989 * @param pu64 Where to return the opcode qword.
2990 */
2991DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2992{
2993 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2994 if (rcStrict == VINF_SUCCESS)
2995 {
2996 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2997 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2998 pVCpu->iem.s.abOpcode[offOpcode + 1],
2999 pVCpu->iem.s.abOpcode[offOpcode + 2],
3000 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3001 pVCpu->iem.s.offOpcode = offOpcode + 4;
3002 }
3003 else
3004 *pu64 = 0;
3005 return rcStrict;
3006}
3007
3008
3009/**
3010 * Fetches the next opcode dword, sign extending it into a quad word.
3011 *
3012 * @returns Strict VBox status code.
3013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3014 * @param pu64 Where to return the opcode quad word.
3015 */
3016DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3017{
3018 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3019 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3020 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3021
3022 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3023 pVCpu->iem.s.abOpcode[offOpcode + 1],
3024 pVCpu->iem.s.abOpcode[offOpcode + 2],
3025 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3026 *pu64 = i32;
3027 pVCpu->iem.s.offOpcode = offOpcode + 4;
3028 return VINF_SUCCESS;
3029}
3030
3031#endif /* !IEM_WITH_SETJMP */
3032
3033
3034/**
3035 * Fetches the next opcode double word and sign extends it to a quad word,
3036 * returns automatically on failure.
3037 *
3038 * @param a_pu64 Where to return the opcode quad word.
3039 * @remark Implicitly references pVCpu.
3040 */
3041#ifndef IEM_WITH_SETJMP
3042# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3043 do \
3044 { \
3045 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3046 if (rcStrict2 != VINF_SUCCESS) \
3047 return rcStrict2; \
3048 } while (0)
3049#else
3050# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3051#endif
3052
3053#ifndef IEM_WITH_SETJMP
3054
3055/**
3056 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3057 *
3058 * @returns Strict VBox status code.
3059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3060 * @param pu64 Where to return the opcode qword.
3061 */
3062DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3063{
3064 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3065 if (rcStrict == VINF_SUCCESS)
3066 {
3067 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3068# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3069 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3070# else
3071 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3072 pVCpu->iem.s.abOpcode[offOpcode + 1],
3073 pVCpu->iem.s.abOpcode[offOpcode + 2],
3074 pVCpu->iem.s.abOpcode[offOpcode + 3],
3075 pVCpu->iem.s.abOpcode[offOpcode + 4],
3076 pVCpu->iem.s.abOpcode[offOpcode + 5],
3077 pVCpu->iem.s.abOpcode[offOpcode + 6],
3078 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3079# endif
3080 pVCpu->iem.s.offOpcode = offOpcode + 8;
3081 }
3082 else
3083 *pu64 = 0;
3084 return rcStrict;
3085}
3086
3087
3088/**
3089 * Fetches the next opcode qword.
3090 *
3091 * @returns Strict VBox status code.
3092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3093 * @param pu64 Where to return the opcode qword.
3094 */
3095DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3096{
3097 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3098 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3099 {
3100# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3101 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3102# else
3103 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3104 pVCpu->iem.s.abOpcode[offOpcode + 1],
3105 pVCpu->iem.s.abOpcode[offOpcode + 2],
3106 pVCpu->iem.s.abOpcode[offOpcode + 3],
3107 pVCpu->iem.s.abOpcode[offOpcode + 4],
3108 pVCpu->iem.s.abOpcode[offOpcode + 5],
3109 pVCpu->iem.s.abOpcode[offOpcode + 6],
3110 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3111# endif
3112 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3113 return VINF_SUCCESS;
3114 }
3115 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3116}
3117
3118#else /* IEM_WITH_SETJMP */
3119
3120/**
3121 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3122 *
3123 * @returns The opcode qword.
3124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3125 */
3126DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3127{
3128# ifdef IEM_WITH_CODE_TLB
3129 uint64_t u64;
3130 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3131 return u64;
3132# else
3133 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3134 if (rcStrict == VINF_SUCCESS)
3135 {
3136 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3137 pVCpu->iem.s.offOpcode = offOpcode + 8;
3138# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3139 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3140# else
3141 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3142 pVCpu->iem.s.abOpcode[offOpcode + 1],
3143 pVCpu->iem.s.abOpcode[offOpcode + 2],
3144 pVCpu->iem.s.abOpcode[offOpcode + 3],
3145 pVCpu->iem.s.abOpcode[offOpcode + 4],
3146 pVCpu->iem.s.abOpcode[offOpcode + 5],
3147 pVCpu->iem.s.abOpcode[offOpcode + 6],
3148 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3149# endif
3150 }
3151 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3152# endif
3153}
3154
3155
3156/**
3157 * Fetches the next opcode qword, longjmp on error.
3158 *
3159 * @returns The opcode qword.
3160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3161 */
3162DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3163{
3164# ifdef IEM_WITH_CODE_TLB
3165 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3166 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3167 if (RT_LIKELY( pbBuf != NULL
3168 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3169 {
3170 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3171# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3172 return *(uint64_t const *)&pbBuf[offBuf];
3173# else
3174 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3175 pbBuf[offBuf + 1],
3176 pbBuf[offBuf + 2],
3177 pbBuf[offBuf + 3],
3178 pbBuf[offBuf + 4],
3179 pbBuf[offBuf + 5],
3180 pbBuf[offBuf + 6],
3181 pbBuf[offBuf + 7]);
3182# endif
3183 }
3184# else
3185 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3186 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3187 {
3188 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3189# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3190 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3191# else
3192 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3193 pVCpu->iem.s.abOpcode[offOpcode + 1],
3194 pVCpu->iem.s.abOpcode[offOpcode + 2],
3195 pVCpu->iem.s.abOpcode[offOpcode + 3],
3196 pVCpu->iem.s.abOpcode[offOpcode + 4],
3197 pVCpu->iem.s.abOpcode[offOpcode + 5],
3198 pVCpu->iem.s.abOpcode[offOpcode + 6],
3199 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3200# endif
3201 }
3202# endif
3203 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3204}
3205
3206#endif /* IEM_WITH_SETJMP */
3207
3208/**
3209 * Fetches the next opcode quad word, returns automatically on failure.
3210 *
3211 * @param a_pu64 Where to return the opcode quad word.
3212 * @remark Implicitly references pVCpu.
3213 */
3214#ifndef IEM_WITH_SETJMP
3215# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3216 do \
3217 { \
3218 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3219 if (rcStrict2 != VINF_SUCCESS) \
3220 return rcStrict2; \
3221 } while (0)
3222#else
3223# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3224#endif
3225
3226
3227/** @name Misc Worker Functions.
3228 * @{
3229 */
3230
3231/**
3232 * Gets the exception class for the specified exception vector.
3233 *
3234 * @returns The class of the specified exception.
3235 * @param uVector The exception vector.
3236 */
3237IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3238{
3239 Assert(uVector <= X86_XCPT_LAST);
3240 switch (uVector)
3241 {
3242 case X86_XCPT_DE:
3243 case X86_XCPT_TS:
3244 case X86_XCPT_NP:
3245 case X86_XCPT_SS:
3246 case X86_XCPT_GP:
3247 case X86_XCPT_SX: /* AMD only */
3248 return IEMXCPTCLASS_CONTRIBUTORY;
3249
3250 case X86_XCPT_PF:
3251 case X86_XCPT_VE: /* Intel only */
3252 return IEMXCPTCLASS_PAGE_FAULT;
3253
3254 case X86_XCPT_DF:
3255 return IEMXCPTCLASS_DOUBLE_FAULT;
3256 }
3257 return IEMXCPTCLASS_BENIGN;
3258}
3259
3260
3261/**
3262 * Evaluates how to handle an exception caused during delivery of another event
3263 * (exception / interrupt).
3264 *
3265 * @returns How to handle the recursive exception.
3266 * @param pVCpu The cross context virtual CPU structure of the
3267 * calling thread.
3268 * @param fPrevFlags The flags of the previous event.
3269 * @param uPrevVector The vector of the previous event.
3270 * @param fCurFlags The flags of the current exception.
3271 * @param uCurVector The vector of the current exception.
3272 * @param pfXcptRaiseInfo Where to store additional information about the
3273 * exception condition. Optional.
3274 */
3275VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3276 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3277{
3278 /*
3279 * Only CPU exceptions can be raised while delivering other events, software interrupt
3280 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3281 */
3282 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3283 Assert(pVCpu); RT_NOREF(pVCpu);
3284 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3285
3286 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3287 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3288 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3289 {
3290 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3291 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3292 {
3293 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3294 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3295 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3296 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3297 {
3298 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3299 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3300 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3301 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3302 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3303 }
3304 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3305 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3306 {
3307 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3308 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3309 }
3310 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3311 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3312 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3313 {
3314 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3315 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3316 }
3317 }
3318 else
3319 {
3320 if (uPrevVector == X86_XCPT_NMI)
3321 {
3322 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3323 if (uCurVector == X86_XCPT_PF)
3324 {
3325 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3326 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3327 }
3328 }
3329 else if ( uPrevVector == X86_XCPT_AC
3330 && uCurVector == X86_XCPT_AC)
3331 {
3332 enmRaise = IEMXCPTRAISE_CPU_HANG;
3333 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3334 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3335 }
3336 }
3337 }
3338 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3339 {
3340 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3341 if (uCurVector == X86_XCPT_PF)
3342 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3343 }
3344 else
3345 {
3346 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3347 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3348 }
3349
3350 if (pfXcptRaiseInfo)
3351 *pfXcptRaiseInfo = fRaiseInfo;
3352 return enmRaise;
3353}
3354
3355
3356/**
3357 * Enters the CPU shutdown state initiated by a triple fault or other
3358 * unrecoverable conditions.
3359 *
3360 * @returns Strict VBox status code.
3361 * @param pVCpu The cross context virtual CPU structure of the
3362 * calling thread.
3363 */
3364IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3365{
3366 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3367 {
3368 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3369 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3370 }
3371
3372 RT_NOREF(pVCpu);
3373 return VINF_EM_TRIPLE_FAULT;
3374}
3375
3376
3377/**
3378 * Validates a new SS segment.
3379 *
3380 * @returns VBox strict status code.
3381 * @param pVCpu The cross context virtual CPU structure of the
3382 * calling thread.
3383 * @param pCtx The CPU context.
3384 * @param NewSS The new SS selctor.
3385 * @param uCpl The CPL to load the stack for.
3386 * @param pDesc Where to return the descriptor.
3387 */
3388IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3389{
3390 NOREF(pCtx);
3391
3392 /* Null selectors are not allowed (we're not called for dispatching
3393 interrupts with SS=0 in long mode). */
3394 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3395 {
3396 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3397 return iemRaiseTaskSwitchFault0(pVCpu);
3398 }
3399
3400 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3401 if ((NewSS & X86_SEL_RPL) != uCpl)
3402 {
3403 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3404 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3405 }
3406
3407 /*
3408 * Read the descriptor.
3409 */
3410 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3411 if (rcStrict != VINF_SUCCESS)
3412 return rcStrict;
3413
3414 /*
3415 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3416 */
3417 if (!pDesc->Legacy.Gen.u1DescType)
3418 {
3419 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3420 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3421 }
3422
3423 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3424 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3425 {
3426 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3427 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3428 }
3429 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3430 {
3431 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3432 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3433 }
3434
3435 /* Is it there? */
3436 /** @todo testcase: Is this checked before the canonical / limit check below? */
3437 if (!pDesc->Legacy.Gen.u1Present)
3438 {
3439 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3440 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3441 }
3442
3443 return VINF_SUCCESS;
3444}
3445
3446
3447/**
3448 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3449 * not.
3450 *
3451 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3452 * @param a_pCtx The CPU context.
3453 */
3454#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3455# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3456 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3457 ? (a_pCtx)->eflags.u \
3458 : CPUMRawGetEFlags(a_pVCpu) )
3459#else
3460# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3461 ( (a_pCtx)->eflags.u )
3462#endif
3463
3464/**
3465 * Updates the EFLAGS in the correct manner wrt. PATM.
3466 *
3467 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3468 * @param a_pCtx The CPU context.
3469 * @param a_fEfl The new EFLAGS.
3470 */
3471#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3472# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3473 do { \
3474 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3475 (a_pCtx)->eflags.u = (a_fEfl); \
3476 else \
3477 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3478 } while (0)
3479#else
3480# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3481 do { \
3482 (a_pCtx)->eflags.u = (a_fEfl); \
3483 } while (0)
3484#endif
3485
3486
3487/** @} */
3488
3489/** @name Raising Exceptions.
3490 *
3491 * @{
3492 */
3493
3494
3495/**
3496 * Loads the specified stack far pointer from the TSS.
3497 *
3498 * @returns VBox strict status code.
3499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3500 * @param pCtx The CPU context.
3501 * @param uCpl The CPL to load the stack for.
3502 * @param pSelSS Where to return the new stack segment.
3503 * @param puEsp Where to return the new stack pointer.
3504 */
3505IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3506 PRTSEL pSelSS, uint32_t *puEsp)
3507{
3508 VBOXSTRICTRC rcStrict;
3509 Assert(uCpl < 4);
3510
3511 switch (pCtx->tr.Attr.n.u4Type)
3512 {
3513 /*
3514 * 16-bit TSS (X86TSS16).
3515 */
3516 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3517 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3518 {
3519 uint32_t off = uCpl * 4 + 2;
3520 if (off + 4 <= pCtx->tr.u32Limit)
3521 {
3522 /** @todo check actual access pattern here. */
3523 uint32_t u32Tmp = 0; /* gcc maybe... */
3524 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3525 if (rcStrict == VINF_SUCCESS)
3526 {
3527 *puEsp = RT_LOWORD(u32Tmp);
3528 *pSelSS = RT_HIWORD(u32Tmp);
3529 return VINF_SUCCESS;
3530 }
3531 }
3532 else
3533 {
3534 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3535 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3536 }
3537 break;
3538 }
3539
3540 /*
3541 * 32-bit TSS (X86TSS32).
3542 */
3543 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3544 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3545 {
3546 uint32_t off = uCpl * 8 + 4;
3547 if (off + 7 <= pCtx->tr.u32Limit)
3548 {
3549/** @todo check actual access pattern here. */
3550 uint64_t u64Tmp;
3551 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3552 if (rcStrict == VINF_SUCCESS)
3553 {
3554 *puEsp = u64Tmp & UINT32_MAX;
3555 *pSelSS = (RTSEL)(u64Tmp >> 32);
3556 return VINF_SUCCESS;
3557 }
3558 }
3559 else
3560 {
3561 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3562 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3563 }
3564 break;
3565 }
3566
3567 default:
3568 AssertFailed();
3569 rcStrict = VERR_IEM_IPE_4;
3570 break;
3571 }
3572
3573 *puEsp = 0; /* make gcc happy */
3574 *pSelSS = 0; /* make gcc happy */
3575 return rcStrict;
3576}
3577
3578
3579/**
3580 * Loads the specified stack pointer from the 64-bit TSS.
3581 *
3582 * @returns VBox strict status code.
3583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3584 * @param pCtx The CPU context.
3585 * @param uCpl The CPL to load the stack for.
3586 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3587 * @param puRsp Where to return the new stack pointer.
3588 */
3589IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3590{
3591 Assert(uCpl < 4);
3592 Assert(uIst < 8);
3593 *puRsp = 0; /* make gcc happy */
3594
3595 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3596
3597 uint32_t off;
3598 if (uIst)
3599 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3600 else
3601 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3602 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3603 {
3604 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3605 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3606 }
3607
3608 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3609}
3610
3611
3612/**
3613 * Adjust the CPU state according to the exception being raised.
3614 *
3615 * @param pCtx The CPU context.
3616 * @param u8Vector The exception that has been raised.
3617 */
3618DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3619{
3620 switch (u8Vector)
3621 {
3622 case X86_XCPT_DB:
3623 pCtx->dr[7] &= ~X86_DR7_GD;
3624 break;
3625 /** @todo Read the AMD and Intel exception reference... */
3626 }
3627}
3628
3629
3630/**
3631 * Implements exceptions and interrupts for real mode.
3632 *
3633 * @returns VBox strict status code.
3634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3635 * @param pCtx The CPU context.
3636 * @param cbInstr The number of bytes to offset rIP by in the return
3637 * address.
3638 * @param u8Vector The interrupt / exception vector number.
3639 * @param fFlags The flags.
3640 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3641 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3642 */
3643IEM_STATIC VBOXSTRICTRC
3644iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3645 PCPUMCTX pCtx,
3646 uint8_t cbInstr,
3647 uint8_t u8Vector,
3648 uint32_t fFlags,
3649 uint16_t uErr,
3650 uint64_t uCr2)
3651{
3652 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3653 NOREF(uErr); NOREF(uCr2);
3654
3655 /*
3656 * Read the IDT entry.
3657 */
3658 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3659 {
3660 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3661 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3662 }
3663 RTFAR16 Idte;
3664 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3665 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3666 return rcStrict;
3667
3668 /*
3669 * Push the stack frame.
3670 */
3671 uint16_t *pu16Frame;
3672 uint64_t uNewRsp;
3673 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3674 if (rcStrict != VINF_SUCCESS)
3675 return rcStrict;
3676
3677 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3678#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3679 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3680 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3681 fEfl |= UINT16_C(0xf000);
3682#endif
3683 pu16Frame[2] = (uint16_t)fEfl;
3684 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3685 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3686 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3687 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3688 return rcStrict;
3689
3690 /*
3691 * Load the vector address into cs:ip and make exception specific state
3692 * adjustments.
3693 */
3694 pCtx->cs.Sel = Idte.sel;
3695 pCtx->cs.ValidSel = Idte.sel;
3696 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3697 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3698 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3699 pCtx->rip = Idte.off;
3700 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3701 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3702
3703 /** @todo do we actually do this in real mode? */
3704 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3705 iemRaiseXcptAdjustState(pCtx, u8Vector);
3706
3707 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3708}
3709
3710
3711/**
3712 * Loads a NULL data selector into when coming from V8086 mode.
3713 *
3714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3715 * @param pSReg Pointer to the segment register.
3716 */
3717IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3718{
3719 pSReg->Sel = 0;
3720 pSReg->ValidSel = 0;
3721 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3722 {
3723 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3724 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3725 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3726 }
3727 else
3728 {
3729 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3730 /** @todo check this on AMD-V */
3731 pSReg->u64Base = 0;
3732 pSReg->u32Limit = 0;
3733 }
3734}
3735
3736
3737/**
3738 * Loads a segment selector during a task switch in V8086 mode.
3739 *
3740 * @param pSReg Pointer to the segment register.
3741 * @param uSel The selector value to load.
3742 */
3743IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3744{
3745 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3746 pSReg->Sel = uSel;
3747 pSReg->ValidSel = uSel;
3748 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3749 pSReg->u64Base = uSel << 4;
3750 pSReg->u32Limit = 0xffff;
3751 pSReg->Attr.u = 0xf3;
3752}
3753
3754
3755/**
3756 * Loads a NULL data selector into a selector register, both the hidden and
3757 * visible parts, in protected mode.
3758 *
3759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3760 * @param pSReg Pointer to the segment register.
3761 * @param uRpl The RPL.
3762 */
3763IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3764{
3765 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3766 * data selector in protected mode. */
3767 pSReg->Sel = uRpl;
3768 pSReg->ValidSel = uRpl;
3769 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3770 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3771 {
3772 /* VT-x (Intel 3960x) observed doing something like this. */
3773 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3774 pSReg->u32Limit = UINT32_MAX;
3775 pSReg->u64Base = 0;
3776 }
3777 else
3778 {
3779 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3780 pSReg->u32Limit = 0;
3781 pSReg->u64Base = 0;
3782 }
3783}
3784
3785
3786/**
3787 * Loads a segment selector during a task switch in protected mode.
3788 *
3789 * In this task switch scenario, we would throw \#TS exceptions rather than
3790 * \#GPs.
3791 *
3792 * @returns VBox strict status code.
3793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3794 * @param pSReg Pointer to the segment register.
3795 * @param uSel The new selector value.
3796 *
3797 * @remarks This does _not_ handle CS or SS.
3798 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3799 */
3800IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3801{
3802 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3803
3804 /* Null data selector. */
3805 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3806 {
3807 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3808 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3809 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3810 return VINF_SUCCESS;
3811 }
3812
3813 /* Fetch the descriptor. */
3814 IEMSELDESC Desc;
3815 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3816 if (rcStrict != VINF_SUCCESS)
3817 {
3818 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3819 VBOXSTRICTRC_VAL(rcStrict)));
3820 return rcStrict;
3821 }
3822
3823 /* Must be a data segment or readable code segment. */
3824 if ( !Desc.Legacy.Gen.u1DescType
3825 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3826 {
3827 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3828 Desc.Legacy.Gen.u4Type));
3829 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3830 }
3831
3832 /* Check privileges for data segments and non-conforming code segments. */
3833 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3834 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3835 {
3836 /* The RPL and the new CPL must be less than or equal to the DPL. */
3837 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3838 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3839 {
3840 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3841 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3842 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3843 }
3844 }
3845
3846 /* Is it there? */
3847 if (!Desc.Legacy.Gen.u1Present)
3848 {
3849 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3850 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3851 }
3852
3853 /* The base and limit. */
3854 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3855 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3856
3857 /*
3858 * Ok, everything checked out fine. Now set the accessed bit before
3859 * committing the result into the registers.
3860 */
3861 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3862 {
3863 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3864 if (rcStrict != VINF_SUCCESS)
3865 return rcStrict;
3866 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3867 }
3868
3869 /* Commit */
3870 pSReg->Sel = uSel;
3871 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3872 pSReg->u32Limit = cbLimit;
3873 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3874 pSReg->ValidSel = uSel;
3875 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3876 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3877 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3878
3879 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3880 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3881 return VINF_SUCCESS;
3882}
3883
3884
3885/**
3886 * Performs a task switch.
3887 *
3888 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3889 * caller is responsible for performing the necessary checks (like DPL, TSS
3890 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3891 * reference for JMP, CALL, IRET.
3892 *
3893 * If the task switch is the due to a software interrupt or hardware exception,
3894 * the caller is responsible for validating the TSS selector and descriptor. See
3895 * Intel Instruction reference for INT n.
3896 *
3897 * @returns VBox strict status code.
3898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3899 * @param pCtx The CPU context.
3900 * @param enmTaskSwitch What caused this task switch.
3901 * @param uNextEip The EIP effective after the task switch.
3902 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3903 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3904 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3905 * @param SelTSS The TSS selector of the new task.
3906 * @param pNewDescTSS Pointer to the new TSS descriptor.
3907 */
3908IEM_STATIC VBOXSTRICTRC
3909iemTaskSwitch(PVMCPU pVCpu,
3910 PCPUMCTX pCtx,
3911 IEMTASKSWITCH enmTaskSwitch,
3912 uint32_t uNextEip,
3913 uint32_t fFlags,
3914 uint16_t uErr,
3915 uint64_t uCr2,
3916 RTSEL SelTSS,
3917 PIEMSELDESC pNewDescTSS)
3918{
3919 Assert(!IEM_IS_REAL_MODE(pVCpu));
3920 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3921
3922 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3923 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3924 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3925 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3926 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3927
3928 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3929 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3930
3931 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3932 fIsNewTSS386, pCtx->eip, uNextEip));
3933
3934 /* Update CR2 in case it's a page-fault. */
3935 /** @todo This should probably be done much earlier in IEM/PGM. See
3936 * @bugref{5653#c49}. */
3937 if (fFlags & IEM_XCPT_FLAGS_CR2)
3938 pCtx->cr2 = uCr2;
3939
3940 /*
3941 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3942 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3943 */
3944 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3945 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3946 if (uNewTSSLimit < uNewTSSLimitMin)
3947 {
3948 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3949 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3950 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3951 }
3952
3953 /*
3954 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3955 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3956 */
3957 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3958 {
3959 uint32_t const uExitInfo1 = SelTSS;
3960 uint32_t uExitInfo2 = uErr;
3961 switch (enmTaskSwitch)
3962 {
3963 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3964 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3965 default: break;
3966 }
3967 if (fFlags & IEM_XCPT_FLAGS_ERR)
3968 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3969 if (pCtx->eflags.Bits.u1RF)
3970 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3971
3972 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3973 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3974 RT_NOREF2(uExitInfo1, uExitInfo2);
3975 }
3976 /** @todo Nested-VMX task-switch intercept. */
3977
3978 /*
3979 * Check the current TSS limit. The last written byte to the current TSS during the
3980 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3981 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3982 *
3983 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3984 * end up with smaller than "legal" TSS limits.
3985 */
3986 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3987 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3988 if (uCurTSSLimit < uCurTSSLimitMin)
3989 {
3990 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3991 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3992 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3993 }
3994
3995 /*
3996 * Verify that the new TSS can be accessed and map it. Map only the required contents
3997 * and not the entire TSS.
3998 */
3999 void *pvNewTSS;
4000 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4001 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4002 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4003 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4004 * not perform correct translation if this happens. See Intel spec. 7.2.1
4005 * "Task-State Segment" */
4006 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4007 if (rcStrict != VINF_SUCCESS)
4008 {
4009 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4010 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4011 return rcStrict;
4012 }
4013
4014 /*
4015 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4016 */
4017 uint32_t u32EFlags = pCtx->eflags.u32;
4018 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4019 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4020 {
4021 PX86DESC pDescCurTSS;
4022 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4023 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4024 if (rcStrict != VINF_SUCCESS)
4025 {
4026 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4027 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4028 return rcStrict;
4029 }
4030
4031 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4032 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4033 if (rcStrict != VINF_SUCCESS)
4034 {
4035 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4036 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4037 return rcStrict;
4038 }
4039
4040 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4041 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4042 {
4043 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4044 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4045 u32EFlags &= ~X86_EFL_NT;
4046 }
4047 }
4048
4049 /*
4050 * Save the CPU state into the current TSS.
4051 */
4052 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4053 if (GCPtrNewTSS == GCPtrCurTSS)
4054 {
4055 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4056 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4057 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4058 }
4059 if (fIsNewTSS386)
4060 {
4061 /*
4062 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4063 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4064 */
4065 void *pvCurTSS32;
4066 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4067 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4068 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4069 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4070 if (rcStrict != VINF_SUCCESS)
4071 {
4072 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4073 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4074 return rcStrict;
4075 }
4076
4077 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4078 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4079 pCurTSS32->eip = uNextEip;
4080 pCurTSS32->eflags = u32EFlags;
4081 pCurTSS32->eax = pCtx->eax;
4082 pCurTSS32->ecx = pCtx->ecx;
4083 pCurTSS32->edx = pCtx->edx;
4084 pCurTSS32->ebx = pCtx->ebx;
4085 pCurTSS32->esp = pCtx->esp;
4086 pCurTSS32->ebp = pCtx->ebp;
4087 pCurTSS32->esi = pCtx->esi;
4088 pCurTSS32->edi = pCtx->edi;
4089 pCurTSS32->es = pCtx->es.Sel;
4090 pCurTSS32->cs = pCtx->cs.Sel;
4091 pCurTSS32->ss = pCtx->ss.Sel;
4092 pCurTSS32->ds = pCtx->ds.Sel;
4093 pCurTSS32->fs = pCtx->fs.Sel;
4094 pCurTSS32->gs = pCtx->gs.Sel;
4095
4096 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4097 if (rcStrict != VINF_SUCCESS)
4098 {
4099 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4100 VBOXSTRICTRC_VAL(rcStrict)));
4101 return rcStrict;
4102 }
4103 }
4104 else
4105 {
4106 /*
4107 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4108 */
4109 void *pvCurTSS16;
4110 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4111 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4112 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4113 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4114 if (rcStrict != VINF_SUCCESS)
4115 {
4116 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4117 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4118 return rcStrict;
4119 }
4120
4121 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4122 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4123 pCurTSS16->ip = uNextEip;
4124 pCurTSS16->flags = u32EFlags;
4125 pCurTSS16->ax = pCtx->ax;
4126 pCurTSS16->cx = pCtx->cx;
4127 pCurTSS16->dx = pCtx->dx;
4128 pCurTSS16->bx = pCtx->bx;
4129 pCurTSS16->sp = pCtx->sp;
4130 pCurTSS16->bp = pCtx->bp;
4131 pCurTSS16->si = pCtx->si;
4132 pCurTSS16->di = pCtx->di;
4133 pCurTSS16->es = pCtx->es.Sel;
4134 pCurTSS16->cs = pCtx->cs.Sel;
4135 pCurTSS16->ss = pCtx->ss.Sel;
4136 pCurTSS16->ds = pCtx->ds.Sel;
4137
4138 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4139 if (rcStrict != VINF_SUCCESS)
4140 {
4141 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4142 VBOXSTRICTRC_VAL(rcStrict)));
4143 return rcStrict;
4144 }
4145 }
4146
4147 /*
4148 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4149 */
4150 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4151 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4152 {
4153 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4154 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4155 pNewTSS->selPrev = pCtx->tr.Sel;
4156 }
4157
4158 /*
4159 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4160 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4161 */
4162 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4163 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4164 bool fNewDebugTrap;
4165 if (fIsNewTSS386)
4166 {
4167 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4168 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4169 uNewEip = pNewTSS32->eip;
4170 uNewEflags = pNewTSS32->eflags;
4171 uNewEax = pNewTSS32->eax;
4172 uNewEcx = pNewTSS32->ecx;
4173 uNewEdx = pNewTSS32->edx;
4174 uNewEbx = pNewTSS32->ebx;
4175 uNewEsp = pNewTSS32->esp;
4176 uNewEbp = pNewTSS32->ebp;
4177 uNewEsi = pNewTSS32->esi;
4178 uNewEdi = pNewTSS32->edi;
4179 uNewES = pNewTSS32->es;
4180 uNewCS = pNewTSS32->cs;
4181 uNewSS = pNewTSS32->ss;
4182 uNewDS = pNewTSS32->ds;
4183 uNewFS = pNewTSS32->fs;
4184 uNewGS = pNewTSS32->gs;
4185 uNewLdt = pNewTSS32->selLdt;
4186 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4187 }
4188 else
4189 {
4190 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4191 uNewCr3 = 0;
4192 uNewEip = pNewTSS16->ip;
4193 uNewEflags = pNewTSS16->flags;
4194 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4195 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4196 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4197 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4198 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4199 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4200 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4201 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4202 uNewES = pNewTSS16->es;
4203 uNewCS = pNewTSS16->cs;
4204 uNewSS = pNewTSS16->ss;
4205 uNewDS = pNewTSS16->ds;
4206 uNewFS = 0;
4207 uNewGS = 0;
4208 uNewLdt = pNewTSS16->selLdt;
4209 fNewDebugTrap = false;
4210 }
4211
4212 if (GCPtrNewTSS == GCPtrCurTSS)
4213 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4214 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4215
4216 /*
4217 * We're done accessing the new TSS.
4218 */
4219 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4220 if (rcStrict != VINF_SUCCESS)
4221 {
4222 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4223 return rcStrict;
4224 }
4225
4226 /*
4227 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4228 */
4229 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4230 {
4231 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4232 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4233 if (rcStrict != VINF_SUCCESS)
4234 {
4235 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4236 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4237 return rcStrict;
4238 }
4239
4240 /* Check that the descriptor indicates the new TSS is available (not busy). */
4241 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4242 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4243 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4244
4245 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4246 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4247 if (rcStrict != VINF_SUCCESS)
4248 {
4249 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4250 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4251 return rcStrict;
4252 }
4253 }
4254
4255 /*
4256 * From this point on, we're technically in the new task. We will defer exceptions
4257 * until the completion of the task switch but before executing any instructions in the new task.
4258 */
4259 pCtx->tr.Sel = SelTSS;
4260 pCtx->tr.ValidSel = SelTSS;
4261 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4262 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4263 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4264 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4265 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4266
4267 /* Set the busy bit in TR. */
4268 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4269 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4270 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4271 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4272 {
4273 uNewEflags |= X86_EFL_NT;
4274 }
4275
4276 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4277 pCtx->cr0 |= X86_CR0_TS;
4278 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4279
4280 pCtx->eip = uNewEip;
4281 pCtx->eax = uNewEax;
4282 pCtx->ecx = uNewEcx;
4283 pCtx->edx = uNewEdx;
4284 pCtx->ebx = uNewEbx;
4285 pCtx->esp = uNewEsp;
4286 pCtx->ebp = uNewEbp;
4287 pCtx->esi = uNewEsi;
4288 pCtx->edi = uNewEdi;
4289
4290 uNewEflags &= X86_EFL_LIVE_MASK;
4291 uNewEflags |= X86_EFL_RA1_MASK;
4292 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4293
4294 /*
4295 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4296 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4297 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4298 */
4299 pCtx->es.Sel = uNewES;
4300 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4301
4302 pCtx->cs.Sel = uNewCS;
4303 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4304
4305 pCtx->ss.Sel = uNewSS;
4306 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4307
4308 pCtx->ds.Sel = uNewDS;
4309 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4310
4311 pCtx->fs.Sel = uNewFS;
4312 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4313
4314 pCtx->gs.Sel = uNewGS;
4315 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4316 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4317
4318 pCtx->ldtr.Sel = uNewLdt;
4319 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4320 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4321 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4322
4323 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4324 {
4325 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4326 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4327 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4328 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4329 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4330 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4331 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4332 }
4333
4334 /*
4335 * Switch CR3 for the new task.
4336 */
4337 if ( fIsNewTSS386
4338 && (pCtx->cr0 & X86_CR0_PG))
4339 {
4340 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4341 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4342 {
4343 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4344 AssertRCSuccessReturn(rc, rc);
4345 }
4346 else
4347 pCtx->cr3 = uNewCr3;
4348
4349 /* Inform PGM. */
4350 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4351 {
4352 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4353 AssertRCReturn(rc, rc);
4354 /* ignore informational status codes */
4355 }
4356 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4357 }
4358
4359 /*
4360 * Switch LDTR for the new task.
4361 */
4362 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4363 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4364 else
4365 {
4366 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4367
4368 IEMSELDESC DescNewLdt;
4369 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4370 if (rcStrict != VINF_SUCCESS)
4371 {
4372 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4373 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4374 return rcStrict;
4375 }
4376 if ( !DescNewLdt.Legacy.Gen.u1Present
4377 || DescNewLdt.Legacy.Gen.u1DescType
4378 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4379 {
4380 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4381 uNewLdt, DescNewLdt.Legacy.u));
4382 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4383 }
4384
4385 pCtx->ldtr.ValidSel = uNewLdt;
4386 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4387 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4388 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4389 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4390 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4391 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4393 }
4394
4395 IEMSELDESC DescSS;
4396 if (IEM_IS_V86_MODE(pVCpu))
4397 {
4398 pVCpu->iem.s.uCpl = 3;
4399 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4400 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4401 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4402 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4403 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4404 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4405
4406 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4407 DescSS.Legacy.u = 0;
4408 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4409 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4410 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4411 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4412 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4413 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4414 DescSS.Legacy.Gen.u2Dpl = 3;
4415 }
4416 else
4417 {
4418 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4419
4420 /*
4421 * Load the stack segment for the new task.
4422 */
4423 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4424 {
4425 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4426 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4427 }
4428
4429 /* Fetch the descriptor. */
4430 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4431 if (rcStrict != VINF_SUCCESS)
4432 {
4433 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4434 VBOXSTRICTRC_VAL(rcStrict)));
4435 return rcStrict;
4436 }
4437
4438 /* SS must be a data segment and writable. */
4439 if ( !DescSS.Legacy.Gen.u1DescType
4440 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4441 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4442 {
4443 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4444 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4445 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4446 }
4447
4448 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4449 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4450 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4451 {
4452 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4453 uNewCpl));
4454 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4455 }
4456
4457 /* Is it there? */
4458 if (!DescSS.Legacy.Gen.u1Present)
4459 {
4460 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4461 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4462 }
4463
4464 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4465 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4466
4467 /* Set the accessed bit before committing the result into SS. */
4468 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4469 {
4470 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4471 if (rcStrict != VINF_SUCCESS)
4472 return rcStrict;
4473 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4474 }
4475
4476 /* Commit SS. */
4477 pCtx->ss.Sel = uNewSS;
4478 pCtx->ss.ValidSel = uNewSS;
4479 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4480 pCtx->ss.u32Limit = cbLimit;
4481 pCtx->ss.u64Base = u64Base;
4482 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4483 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4484
4485 /* CPL has changed, update IEM before loading rest of segments. */
4486 pVCpu->iem.s.uCpl = uNewCpl;
4487
4488 /*
4489 * Load the data segments for the new task.
4490 */
4491 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4492 if (rcStrict != VINF_SUCCESS)
4493 return rcStrict;
4494 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4495 if (rcStrict != VINF_SUCCESS)
4496 return rcStrict;
4497 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4498 if (rcStrict != VINF_SUCCESS)
4499 return rcStrict;
4500 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4501 if (rcStrict != VINF_SUCCESS)
4502 return rcStrict;
4503
4504 /*
4505 * Load the code segment for the new task.
4506 */
4507 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4508 {
4509 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4510 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4511 }
4512
4513 /* Fetch the descriptor. */
4514 IEMSELDESC DescCS;
4515 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4516 if (rcStrict != VINF_SUCCESS)
4517 {
4518 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4519 return rcStrict;
4520 }
4521
4522 /* CS must be a code segment. */
4523 if ( !DescCS.Legacy.Gen.u1DescType
4524 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4525 {
4526 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4527 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4528 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4529 }
4530
4531 /* For conforming CS, DPL must be less than or equal to the RPL. */
4532 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4533 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4534 {
4535 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4536 DescCS.Legacy.Gen.u2Dpl));
4537 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4538 }
4539
4540 /* For non-conforming CS, DPL must match RPL. */
4541 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4542 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4543 {
4544 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4545 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4546 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4547 }
4548
4549 /* Is it there? */
4550 if (!DescCS.Legacy.Gen.u1Present)
4551 {
4552 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4553 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4554 }
4555
4556 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4557 u64Base = X86DESC_BASE(&DescCS.Legacy);
4558
4559 /* Set the accessed bit before committing the result into CS. */
4560 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4561 {
4562 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4563 if (rcStrict != VINF_SUCCESS)
4564 return rcStrict;
4565 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4566 }
4567
4568 /* Commit CS. */
4569 pCtx->cs.Sel = uNewCS;
4570 pCtx->cs.ValidSel = uNewCS;
4571 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4572 pCtx->cs.u32Limit = cbLimit;
4573 pCtx->cs.u64Base = u64Base;
4574 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4575 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4576 }
4577
4578 /** @todo Debug trap. */
4579 if (fIsNewTSS386 && fNewDebugTrap)
4580 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4581
4582 /*
4583 * Construct the error code masks based on what caused this task switch.
4584 * See Intel Instruction reference for INT.
4585 */
4586 uint16_t uExt;
4587 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4588 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4589 {
4590 uExt = 1;
4591 }
4592 else
4593 uExt = 0;
4594
4595 /*
4596 * Push any error code on to the new stack.
4597 */
4598 if (fFlags & IEM_XCPT_FLAGS_ERR)
4599 {
4600 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4601 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4602 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4603
4604 /* Check that there is sufficient space on the stack. */
4605 /** @todo Factor out segment limit checking for normal/expand down segments
4606 * into a separate function. */
4607 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4608 {
4609 if ( pCtx->esp - 1 > cbLimitSS
4610 || pCtx->esp < cbStackFrame)
4611 {
4612 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4613 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4614 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4615 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4616 }
4617 }
4618 else
4619 {
4620 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4621 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4622 {
4623 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4624 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4625 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4626 }
4627 }
4628
4629
4630 if (fIsNewTSS386)
4631 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4632 else
4633 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4634 if (rcStrict != VINF_SUCCESS)
4635 {
4636 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4637 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4638 return rcStrict;
4639 }
4640 }
4641
4642 /* Check the new EIP against the new CS limit. */
4643 if (pCtx->eip > pCtx->cs.u32Limit)
4644 {
4645 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4646 pCtx->eip, pCtx->cs.u32Limit));
4647 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4648 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4649 }
4650
4651 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4652 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4653}
4654
4655
4656/**
4657 * Implements exceptions and interrupts for protected mode.
4658 *
4659 * @returns VBox strict status code.
4660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4661 * @param pCtx The CPU context.
4662 * @param cbInstr The number of bytes to offset rIP by in the return
4663 * address.
4664 * @param u8Vector The interrupt / exception vector number.
4665 * @param fFlags The flags.
4666 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4667 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4668 */
4669IEM_STATIC VBOXSTRICTRC
4670iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4671 PCPUMCTX pCtx,
4672 uint8_t cbInstr,
4673 uint8_t u8Vector,
4674 uint32_t fFlags,
4675 uint16_t uErr,
4676 uint64_t uCr2)
4677{
4678 /*
4679 * Read the IDT entry.
4680 */
4681 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4682 {
4683 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4684 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4685 }
4686 X86DESC Idte;
4687 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4688 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4689 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4690 return rcStrict;
4691 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4692 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4693 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4694
4695 /*
4696 * Check the descriptor type, DPL and such.
4697 * ASSUMES this is done in the same order as described for call-gate calls.
4698 */
4699 if (Idte.Gate.u1DescType)
4700 {
4701 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4702 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4703 }
4704 bool fTaskGate = false;
4705 uint8_t f32BitGate = true;
4706 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4707 switch (Idte.Gate.u4Type)
4708 {
4709 case X86_SEL_TYPE_SYS_UNDEFINED:
4710 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4711 case X86_SEL_TYPE_SYS_LDT:
4712 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4713 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4714 case X86_SEL_TYPE_SYS_UNDEFINED2:
4715 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4716 case X86_SEL_TYPE_SYS_UNDEFINED3:
4717 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4718 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4719 case X86_SEL_TYPE_SYS_UNDEFINED4:
4720 {
4721 /** @todo check what actually happens when the type is wrong...
4722 * esp. call gates. */
4723 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4724 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4725 }
4726
4727 case X86_SEL_TYPE_SYS_286_INT_GATE:
4728 f32BitGate = false;
4729 RT_FALL_THRU();
4730 case X86_SEL_TYPE_SYS_386_INT_GATE:
4731 fEflToClear |= X86_EFL_IF;
4732 break;
4733
4734 case X86_SEL_TYPE_SYS_TASK_GATE:
4735 fTaskGate = true;
4736#ifndef IEM_IMPLEMENTS_TASKSWITCH
4737 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4738#endif
4739 break;
4740
4741 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4742 f32BitGate = false;
4743 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4744 break;
4745
4746 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4747 }
4748
4749 /* Check DPL against CPL if applicable. */
4750 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4751 {
4752 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4753 {
4754 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4755 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4756 }
4757 }
4758
4759 /* Is it there? */
4760 if (!Idte.Gate.u1Present)
4761 {
4762 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4763 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4764 }
4765
4766 /* Is it a task-gate? */
4767 if (fTaskGate)
4768 {
4769 /*
4770 * Construct the error code masks based on what caused this task switch.
4771 * See Intel Instruction reference for INT.
4772 */
4773 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4774 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4775 RTSEL SelTSS = Idte.Gate.u16Sel;
4776
4777 /*
4778 * Fetch the TSS descriptor in the GDT.
4779 */
4780 IEMSELDESC DescTSS;
4781 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4782 if (rcStrict != VINF_SUCCESS)
4783 {
4784 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4785 VBOXSTRICTRC_VAL(rcStrict)));
4786 return rcStrict;
4787 }
4788
4789 /* The TSS descriptor must be a system segment and be available (not busy). */
4790 if ( DescTSS.Legacy.Gen.u1DescType
4791 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4792 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4793 {
4794 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4795 u8Vector, SelTSS, DescTSS.Legacy.au64));
4796 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4797 }
4798
4799 /* The TSS must be present. */
4800 if (!DescTSS.Legacy.Gen.u1Present)
4801 {
4802 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4803 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4804 }
4805
4806 /* Do the actual task switch. */
4807 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4808 }
4809
4810 /* A null CS is bad. */
4811 RTSEL NewCS = Idte.Gate.u16Sel;
4812 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4813 {
4814 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4815 return iemRaiseGeneralProtectionFault0(pVCpu);
4816 }
4817
4818 /* Fetch the descriptor for the new CS. */
4819 IEMSELDESC DescCS;
4820 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4821 if (rcStrict != VINF_SUCCESS)
4822 {
4823 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4824 return rcStrict;
4825 }
4826
4827 /* Must be a code segment. */
4828 if (!DescCS.Legacy.Gen.u1DescType)
4829 {
4830 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4831 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4832 }
4833 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4834 {
4835 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4836 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4837 }
4838
4839 /* Don't allow lowering the privilege level. */
4840 /** @todo Does the lowering of privileges apply to software interrupts
4841 * only? This has bearings on the more-privileged or
4842 * same-privilege stack behavior further down. A testcase would
4843 * be nice. */
4844 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4845 {
4846 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4847 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4848 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4849 }
4850
4851 /* Make sure the selector is present. */
4852 if (!DescCS.Legacy.Gen.u1Present)
4853 {
4854 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4855 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4856 }
4857
4858 /* Check the new EIP against the new CS limit. */
4859 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4860 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4861 ? Idte.Gate.u16OffsetLow
4862 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4863 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4864 if (uNewEip > cbLimitCS)
4865 {
4866 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4867 u8Vector, uNewEip, cbLimitCS, NewCS));
4868 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4869 }
4870 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4871
4872 /* Calc the flag image to push. */
4873 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4874 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4875 fEfl &= ~X86_EFL_RF;
4876 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4877 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4878
4879 /* From V8086 mode only go to CPL 0. */
4880 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4881 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4882 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4883 {
4884 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4885 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4886 }
4887
4888 /*
4889 * If the privilege level changes, we need to get a new stack from the TSS.
4890 * This in turns means validating the new SS and ESP...
4891 */
4892 if (uNewCpl != pVCpu->iem.s.uCpl)
4893 {
4894 RTSEL NewSS;
4895 uint32_t uNewEsp;
4896 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4897 if (rcStrict != VINF_SUCCESS)
4898 return rcStrict;
4899
4900 IEMSELDESC DescSS;
4901 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4902 if (rcStrict != VINF_SUCCESS)
4903 return rcStrict;
4904 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4905 if (!DescSS.Legacy.Gen.u1DefBig)
4906 {
4907 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4908 uNewEsp = (uint16_t)uNewEsp;
4909 }
4910
4911 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4912
4913 /* Check that there is sufficient space for the stack frame. */
4914 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4915 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4916 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4917 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4918
4919 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4920 {
4921 if ( uNewEsp - 1 > cbLimitSS
4922 || uNewEsp < cbStackFrame)
4923 {
4924 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4925 u8Vector, NewSS, uNewEsp, cbStackFrame));
4926 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4927 }
4928 }
4929 else
4930 {
4931 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4932 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4933 {
4934 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4935 u8Vector, NewSS, uNewEsp, cbStackFrame));
4936 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4937 }
4938 }
4939
4940 /*
4941 * Start making changes.
4942 */
4943
4944 /* Set the new CPL so that stack accesses use it. */
4945 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4946 pVCpu->iem.s.uCpl = uNewCpl;
4947
4948 /* Create the stack frame. */
4949 RTPTRUNION uStackFrame;
4950 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4951 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4952 if (rcStrict != VINF_SUCCESS)
4953 return rcStrict;
4954 void * const pvStackFrame = uStackFrame.pv;
4955 if (f32BitGate)
4956 {
4957 if (fFlags & IEM_XCPT_FLAGS_ERR)
4958 *uStackFrame.pu32++ = uErr;
4959 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4960 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4961 uStackFrame.pu32[2] = fEfl;
4962 uStackFrame.pu32[3] = pCtx->esp;
4963 uStackFrame.pu32[4] = pCtx->ss.Sel;
4964 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4965 if (fEfl & X86_EFL_VM)
4966 {
4967 uStackFrame.pu32[1] = pCtx->cs.Sel;
4968 uStackFrame.pu32[5] = pCtx->es.Sel;
4969 uStackFrame.pu32[6] = pCtx->ds.Sel;
4970 uStackFrame.pu32[7] = pCtx->fs.Sel;
4971 uStackFrame.pu32[8] = pCtx->gs.Sel;
4972 }
4973 }
4974 else
4975 {
4976 if (fFlags & IEM_XCPT_FLAGS_ERR)
4977 *uStackFrame.pu16++ = uErr;
4978 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4979 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4980 uStackFrame.pu16[2] = fEfl;
4981 uStackFrame.pu16[3] = pCtx->sp;
4982 uStackFrame.pu16[4] = pCtx->ss.Sel;
4983 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4984 if (fEfl & X86_EFL_VM)
4985 {
4986 uStackFrame.pu16[1] = pCtx->cs.Sel;
4987 uStackFrame.pu16[5] = pCtx->es.Sel;
4988 uStackFrame.pu16[6] = pCtx->ds.Sel;
4989 uStackFrame.pu16[7] = pCtx->fs.Sel;
4990 uStackFrame.pu16[8] = pCtx->gs.Sel;
4991 }
4992 }
4993 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4994 if (rcStrict != VINF_SUCCESS)
4995 return rcStrict;
4996
4997 /* Mark the selectors 'accessed' (hope this is the correct time). */
4998 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4999 * after pushing the stack frame? (Write protect the gdt + stack to
5000 * find out.) */
5001 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5002 {
5003 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5004 if (rcStrict != VINF_SUCCESS)
5005 return rcStrict;
5006 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5007 }
5008
5009 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5010 {
5011 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5012 if (rcStrict != VINF_SUCCESS)
5013 return rcStrict;
5014 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5015 }
5016
5017 /*
5018 * Start comitting the register changes (joins with the DPL=CPL branch).
5019 */
5020 pCtx->ss.Sel = NewSS;
5021 pCtx->ss.ValidSel = NewSS;
5022 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5023 pCtx->ss.u32Limit = cbLimitSS;
5024 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5025 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5026 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5027 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5028 * SP is loaded).
5029 * Need to check the other combinations too:
5030 * - 16-bit TSS, 32-bit handler
5031 * - 32-bit TSS, 16-bit handler */
5032 if (!pCtx->ss.Attr.n.u1DefBig)
5033 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5034 else
5035 pCtx->rsp = uNewEsp - cbStackFrame;
5036
5037 if (fEfl & X86_EFL_VM)
5038 {
5039 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5040 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5041 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5042 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5043 }
5044 }
5045 /*
5046 * Same privilege, no stack change and smaller stack frame.
5047 */
5048 else
5049 {
5050 uint64_t uNewRsp;
5051 RTPTRUNION uStackFrame;
5052 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5053 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5054 if (rcStrict != VINF_SUCCESS)
5055 return rcStrict;
5056 void * const pvStackFrame = uStackFrame.pv;
5057
5058 if (f32BitGate)
5059 {
5060 if (fFlags & IEM_XCPT_FLAGS_ERR)
5061 *uStackFrame.pu32++ = uErr;
5062 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5063 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5064 uStackFrame.pu32[2] = fEfl;
5065 }
5066 else
5067 {
5068 if (fFlags & IEM_XCPT_FLAGS_ERR)
5069 *uStackFrame.pu16++ = uErr;
5070 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5071 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5072 uStackFrame.pu16[2] = fEfl;
5073 }
5074 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5075 if (rcStrict != VINF_SUCCESS)
5076 return rcStrict;
5077
5078 /* Mark the CS selector as 'accessed'. */
5079 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5080 {
5081 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5082 if (rcStrict != VINF_SUCCESS)
5083 return rcStrict;
5084 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5085 }
5086
5087 /*
5088 * Start committing the register changes (joins with the other branch).
5089 */
5090 pCtx->rsp = uNewRsp;
5091 }
5092
5093 /* ... register committing continues. */
5094 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5095 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5096 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5097 pCtx->cs.u32Limit = cbLimitCS;
5098 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5099 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5100
5101 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5102 fEfl &= ~fEflToClear;
5103 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5104
5105 if (fFlags & IEM_XCPT_FLAGS_CR2)
5106 pCtx->cr2 = uCr2;
5107
5108 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5109 iemRaiseXcptAdjustState(pCtx, u8Vector);
5110
5111 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5112}
5113
5114
5115/**
5116 * Implements exceptions and interrupts for long mode.
5117 *
5118 * @returns VBox strict status code.
5119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5120 * @param pCtx The CPU context.
5121 * @param cbInstr The number of bytes to offset rIP by in the return
5122 * address.
5123 * @param u8Vector The interrupt / exception vector number.
5124 * @param fFlags The flags.
5125 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5126 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5127 */
5128IEM_STATIC VBOXSTRICTRC
5129iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5130 PCPUMCTX pCtx,
5131 uint8_t cbInstr,
5132 uint8_t u8Vector,
5133 uint32_t fFlags,
5134 uint16_t uErr,
5135 uint64_t uCr2)
5136{
5137 /*
5138 * Read the IDT entry.
5139 */
5140 uint16_t offIdt = (uint16_t)u8Vector << 4;
5141 if (pCtx->idtr.cbIdt < offIdt + 7)
5142 {
5143 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5144 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5145 }
5146 X86DESC64 Idte;
5147 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5148 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5149 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5150 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5151 return rcStrict;
5152 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5153 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5154 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5155
5156 /*
5157 * Check the descriptor type, DPL and such.
5158 * ASSUMES this is done in the same order as described for call-gate calls.
5159 */
5160 if (Idte.Gate.u1DescType)
5161 {
5162 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5163 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5164 }
5165 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5166 switch (Idte.Gate.u4Type)
5167 {
5168 case AMD64_SEL_TYPE_SYS_INT_GATE:
5169 fEflToClear |= X86_EFL_IF;
5170 break;
5171 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5172 break;
5173
5174 default:
5175 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5176 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5177 }
5178
5179 /* Check DPL against CPL if applicable. */
5180 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5181 {
5182 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5183 {
5184 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5185 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5186 }
5187 }
5188
5189 /* Is it there? */
5190 if (!Idte.Gate.u1Present)
5191 {
5192 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5193 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5194 }
5195
5196 /* A null CS is bad. */
5197 RTSEL NewCS = Idte.Gate.u16Sel;
5198 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5199 {
5200 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5201 return iemRaiseGeneralProtectionFault0(pVCpu);
5202 }
5203
5204 /* Fetch the descriptor for the new CS. */
5205 IEMSELDESC DescCS;
5206 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5207 if (rcStrict != VINF_SUCCESS)
5208 {
5209 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5210 return rcStrict;
5211 }
5212
5213 /* Must be a 64-bit code segment. */
5214 if (!DescCS.Long.Gen.u1DescType)
5215 {
5216 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5217 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5218 }
5219 if ( !DescCS.Long.Gen.u1Long
5220 || DescCS.Long.Gen.u1DefBig
5221 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5222 {
5223 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5224 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5225 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5226 }
5227
5228 /* Don't allow lowering the privilege level. For non-conforming CS
5229 selectors, the CS.DPL sets the privilege level the trap/interrupt
5230 handler runs at. For conforming CS selectors, the CPL remains
5231 unchanged, but the CS.DPL must be <= CPL. */
5232 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5233 * when CPU in Ring-0. Result \#GP? */
5234 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5235 {
5236 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5237 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5238 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5239 }
5240
5241
5242 /* Make sure the selector is present. */
5243 if (!DescCS.Legacy.Gen.u1Present)
5244 {
5245 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5246 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5247 }
5248
5249 /* Check that the new RIP is canonical. */
5250 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5251 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5252 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5253 if (!IEM_IS_CANONICAL(uNewRip))
5254 {
5255 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5256 return iemRaiseGeneralProtectionFault0(pVCpu);
5257 }
5258
5259 /*
5260 * If the privilege level changes or if the IST isn't zero, we need to get
5261 * a new stack from the TSS.
5262 */
5263 uint64_t uNewRsp;
5264 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5265 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5266 if ( uNewCpl != pVCpu->iem.s.uCpl
5267 || Idte.Gate.u3IST != 0)
5268 {
5269 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5270 if (rcStrict != VINF_SUCCESS)
5271 return rcStrict;
5272 }
5273 else
5274 uNewRsp = pCtx->rsp;
5275 uNewRsp &= ~(uint64_t)0xf;
5276
5277 /*
5278 * Calc the flag image to push.
5279 */
5280 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5281 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5282 fEfl &= ~X86_EFL_RF;
5283 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5284 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5285
5286 /*
5287 * Start making changes.
5288 */
5289 /* Set the new CPL so that stack accesses use it. */
5290 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5291 pVCpu->iem.s.uCpl = uNewCpl;
5292
5293 /* Create the stack frame. */
5294 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5295 RTPTRUNION uStackFrame;
5296 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5297 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5298 if (rcStrict != VINF_SUCCESS)
5299 return rcStrict;
5300 void * const pvStackFrame = uStackFrame.pv;
5301
5302 if (fFlags & IEM_XCPT_FLAGS_ERR)
5303 *uStackFrame.pu64++ = uErr;
5304 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5305 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5306 uStackFrame.pu64[2] = fEfl;
5307 uStackFrame.pu64[3] = pCtx->rsp;
5308 uStackFrame.pu64[4] = pCtx->ss.Sel;
5309 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5310 if (rcStrict != VINF_SUCCESS)
5311 return rcStrict;
5312
5313 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5314 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5315 * after pushing the stack frame? (Write protect the gdt + stack to
5316 * find out.) */
5317 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5318 {
5319 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5320 if (rcStrict != VINF_SUCCESS)
5321 return rcStrict;
5322 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5323 }
5324
5325 /*
5326 * Start comitting the register changes.
5327 */
5328 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5329 * hidden registers when interrupting 32-bit or 16-bit code! */
5330 if (uNewCpl != uOldCpl)
5331 {
5332 pCtx->ss.Sel = 0 | uNewCpl;
5333 pCtx->ss.ValidSel = 0 | uNewCpl;
5334 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5335 pCtx->ss.u32Limit = UINT32_MAX;
5336 pCtx->ss.u64Base = 0;
5337 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5338 }
5339 pCtx->rsp = uNewRsp - cbStackFrame;
5340 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5341 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5342 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5343 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5344 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5345 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5346 pCtx->rip = uNewRip;
5347
5348 fEfl &= ~fEflToClear;
5349 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5350
5351 if (fFlags & IEM_XCPT_FLAGS_CR2)
5352 pCtx->cr2 = uCr2;
5353
5354 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5355 iemRaiseXcptAdjustState(pCtx, u8Vector);
5356
5357 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5358}
5359
5360
5361/**
5362 * Implements exceptions and interrupts.
5363 *
5364 * All exceptions and interrupts goes thru this function!
5365 *
5366 * @returns VBox strict status code.
5367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5368 * @param cbInstr The number of bytes to offset rIP by in the return
5369 * address.
5370 * @param u8Vector The interrupt / exception vector number.
5371 * @param fFlags The flags.
5372 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5373 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5374 */
5375DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5376iemRaiseXcptOrInt(PVMCPU pVCpu,
5377 uint8_t cbInstr,
5378 uint8_t u8Vector,
5379 uint32_t fFlags,
5380 uint16_t uErr,
5381 uint64_t uCr2)
5382{
5383 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5384#ifdef IN_RING0
5385 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5386 AssertRCReturn(rc, rc);
5387#endif
5388
5389#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5390 /*
5391 * Flush prefetch buffer
5392 */
5393 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5394#endif
5395
5396 /*
5397 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5398 */
5399 if ( pCtx->eflags.Bits.u1VM
5400 && pCtx->eflags.Bits.u2IOPL != 3
5401 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5402 && (pCtx->cr0 & X86_CR0_PE) )
5403 {
5404 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5405 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5406 u8Vector = X86_XCPT_GP;
5407 uErr = 0;
5408 }
5409#ifdef DBGFTRACE_ENABLED
5410 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5411 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5412 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5413#endif
5414
5415#ifdef VBOX_WITH_NESTED_HWVIRT
5416 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5417 {
5418 /*
5419 * If the event is being injected as part of VMRUN, it isn't subject to event
5420 * intercepts in the nested-guest. However, secondary exceptions that occur
5421 * during injection of any event -are- subject to exception intercepts.
5422 * See AMD spec. 15.20 "Event Injection".
5423 */
5424 if (!pCtx->hwvirt.svm.fInterceptEvents)
5425 pCtx->hwvirt.svm.fInterceptEvents = 1;
5426 else
5427 {
5428 /*
5429 * Check and handle if the event being raised is intercepted.
5430 */
5431 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5432 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5433 return rcStrict0;
5434 }
5435 }
5436#endif /* VBOX_WITH_NESTED_HWVIRT */
5437
5438 /*
5439 * Do recursion accounting.
5440 */
5441 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5442 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5443 if (pVCpu->iem.s.cXcptRecursions == 0)
5444 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5445 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5446 else
5447 {
5448 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5449 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5450 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5451
5452 if (pVCpu->iem.s.cXcptRecursions >= 3)
5453 {
5454#ifdef DEBUG_bird
5455 AssertFailed();
5456#endif
5457 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5458 }
5459
5460 /*
5461 * Evaluate the sequence of recurring events.
5462 */
5463 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5464 NULL /* pXcptRaiseInfo */);
5465 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5466 { /* likely */ }
5467 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5468 {
5469 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5470 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5471 u8Vector = X86_XCPT_DF;
5472 uErr = 0;
5473 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5474 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5475 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5476 }
5477 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5478 {
5479 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5480 return iemInitiateCpuShutdown(pVCpu);
5481 }
5482 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5483 {
5484 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5485 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5486 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5487 return VERR_EM_GUEST_CPU_HANG;
5488 }
5489 else
5490 {
5491 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5492 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5493 return VERR_IEM_IPE_9;
5494 }
5495
5496 /*
5497 * The 'EXT' bit is set when an exception occurs during deliver of an external
5498 * event (such as an interrupt or earlier exception)[1]. Privileged software
5499 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5500 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5501 *
5502 * [1] - Intel spec. 6.13 "Error Code"
5503 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5504 * [3] - Intel Instruction reference for INT n.
5505 */
5506 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5507 && (fFlags & IEM_XCPT_FLAGS_ERR)
5508 && u8Vector != X86_XCPT_PF
5509 && u8Vector != X86_XCPT_DF)
5510 {
5511 uErr |= X86_TRAP_ERR_EXTERNAL;
5512 }
5513 }
5514
5515 pVCpu->iem.s.cXcptRecursions++;
5516 pVCpu->iem.s.uCurXcpt = u8Vector;
5517 pVCpu->iem.s.fCurXcpt = fFlags;
5518 pVCpu->iem.s.uCurXcptErr = uErr;
5519 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5520
5521 /*
5522 * Extensive logging.
5523 */
5524#if defined(LOG_ENABLED) && defined(IN_RING3)
5525 if (LogIs3Enabled())
5526 {
5527 PVM pVM = pVCpu->CTX_SUFF(pVM);
5528 char szRegs[4096];
5529 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5530 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5531 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5532 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5533 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5534 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5535 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5536 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5537 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5538 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5539 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5540 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5541 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5542 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5543 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5544 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5545 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5546 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5547 " efer=%016VR{efer}\n"
5548 " pat=%016VR{pat}\n"
5549 " sf_mask=%016VR{sf_mask}\n"
5550 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5551 " lstar=%016VR{lstar}\n"
5552 " star=%016VR{star} cstar=%016VR{cstar}\n"
5553 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5554 );
5555
5556 char szInstr[256];
5557 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5558 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5559 szInstr, sizeof(szInstr), NULL);
5560 Log3(("%s%s\n", szRegs, szInstr));
5561 }
5562#endif /* LOG_ENABLED */
5563
5564 /*
5565 * Call the mode specific worker function.
5566 */
5567 VBOXSTRICTRC rcStrict;
5568 if (!(pCtx->cr0 & X86_CR0_PE))
5569 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5570 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5571 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5572 else
5573 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5574
5575 /* Flush the prefetch buffer. */
5576#ifdef IEM_WITH_CODE_TLB
5577 pVCpu->iem.s.pbInstrBuf = NULL;
5578#else
5579 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5580#endif
5581
5582 /*
5583 * Unwind.
5584 */
5585 pVCpu->iem.s.cXcptRecursions--;
5586 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5587 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5588 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5589 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5590 return rcStrict;
5591}
5592
5593#ifdef IEM_WITH_SETJMP
5594/**
5595 * See iemRaiseXcptOrInt. Will not return.
5596 */
5597IEM_STATIC DECL_NO_RETURN(void)
5598iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5599 uint8_t cbInstr,
5600 uint8_t u8Vector,
5601 uint32_t fFlags,
5602 uint16_t uErr,
5603 uint64_t uCr2)
5604{
5605 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5606 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5607}
5608#endif
5609
5610
5611/** \#DE - 00. */
5612DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5613{
5614 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5615}
5616
5617
5618/** \#DB - 01.
5619 * @note This automatically clear DR7.GD. */
5620DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5621{
5622 /** @todo set/clear RF. */
5623 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5624 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5625}
5626
5627
5628/** \#BR - 05. */
5629DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5630{
5631 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5632}
5633
5634
5635/** \#UD - 06. */
5636DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5637{
5638 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5639}
5640
5641
5642/** \#NM - 07. */
5643DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5644{
5645 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5646}
5647
5648
5649/** \#TS(err) - 0a. */
5650DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5651{
5652 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5653}
5654
5655
5656/** \#TS(tr) - 0a. */
5657DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5658{
5659 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5660 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5661}
5662
5663
5664/** \#TS(0) - 0a. */
5665DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5666{
5667 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5668 0, 0);
5669}
5670
5671
5672/** \#TS(err) - 0a. */
5673DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5674{
5675 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5676 uSel & X86_SEL_MASK_OFF_RPL, 0);
5677}
5678
5679
5680/** \#NP(err) - 0b. */
5681DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5682{
5683 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5684}
5685
5686
5687/** \#NP(sel) - 0b. */
5688DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5689{
5690 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5691 uSel & ~X86_SEL_RPL, 0);
5692}
5693
5694
5695/** \#SS(seg) - 0c. */
5696DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5697{
5698 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5699 uSel & ~X86_SEL_RPL, 0);
5700}
5701
5702
5703/** \#SS(err) - 0c. */
5704DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5705{
5706 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5707}
5708
5709
5710/** \#GP(n) - 0d. */
5711DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5712{
5713 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5714}
5715
5716
5717/** \#GP(0) - 0d. */
5718DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5719{
5720 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5721}
5722
5723#ifdef IEM_WITH_SETJMP
5724/** \#GP(0) - 0d. */
5725DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5726{
5727 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5728}
5729#endif
5730
5731
5732/** \#GP(sel) - 0d. */
5733DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5734{
5735 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5736 Sel & ~X86_SEL_RPL, 0);
5737}
5738
5739
5740/** \#GP(0) - 0d. */
5741DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5742{
5743 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5744}
5745
5746
5747/** \#GP(sel) - 0d. */
5748DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5749{
5750 NOREF(iSegReg); NOREF(fAccess);
5751 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5752 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5753}
5754
5755#ifdef IEM_WITH_SETJMP
5756/** \#GP(sel) - 0d, longjmp. */
5757DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5758{
5759 NOREF(iSegReg); NOREF(fAccess);
5760 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5761 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5762}
5763#endif
5764
5765/** \#GP(sel) - 0d. */
5766DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5767{
5768 NOREF(Sel);
5769 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5770}
5771
5772#ifdef IEM_WITH_SETJMP
5773/** \#GP(sel) - 0d, longjmp. */
5774DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5775{
5776 NOREF(Sel);
5777 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5778}
5779#endif
5780
5781
5782/** \#GP(sel) - 0d. */
5783DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5784{
5785 NOREF(iSegReg); NOREF(fAccess);
5786 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5787}
5788
5789#ifdef IEM_WITH_SETJMP
5790/** \#GP(sel) - 0d, longjmp. */
5791DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5792 uint32_t fAccess)
5793{
5794 NOREF(iSegReg); NOREF(fAccess);
5795 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5796}
5797#endif
5798
5799
5800/** \#PF(n) - 0e. */
5801DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5802{
5803 uint16_t uErr;
5804 switch (rc)
5805 {
5806 case VERR_PAGE_NOT_PRESENT:
5807 case VERR_PAGE_TABLE_NOT_PRESENT:
5808 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5809 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5810 uErr = 0;
5811 break;
5812
5813 default:
5814 AssertMsgFailed(("%Rrc\n", rc));
5815 RT_FALL_THRU();
5816 case VERR_ACCESS_DENIED:
5817 uErr = X86_TRAP_PF_P;
5818 break;
5819
5820 /** @todo reserved */
5821 }
5822
5823 if (pVCpu->iem.s.uCpl == 3)
5824 uErr |= X86_TRAP_PF_US;
5825
5826 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5827 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5828 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5829 uErr |= X86_TRAP_PF_ID;
5830
5831#if 0 /* This is so much non-sense, really. Why was it done like that? */
5832 /* Note! RW access callers reporting a WRITE protection fault, will clear
5833 the READ flag before calling. So, read-modify-write accesses (RW)
5834 can safely be reported as READ faults. */
5835 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5836 uErr |= X86_TRAP_PF_RW;
5837#else
5838 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5839 {
5840 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5841 uErr |= X86_TRAP_PF_RW;
5842 }
5843#endif
5844
5845 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5846 uErr, GCPtrWhere);
5847}
5848
5849#ifdef IEM_WITH_SETJMP
5850/** \#PF(n) - 0e, longjmp. */
5851IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5852{
5853 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5854}
5855#endif
5856
5857
5858/** \#MF(0) - 10. */
5859DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5860{
5861 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5862}
5863
5864
5865/** \#AC(0) - 11. */
5866DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5867{
5868 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5869}
5870
5871
5872/**
5873 * Macro for calling iemCImplRaiseDivideError().
5874 *
5875 * This enables us to add/remove arguments and force different levels of
5876 * inlining as we wish.
5877 *
5878 * @return Strict VBox status code.
5879 */
5880#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5881IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5882{
5883 NOREF(cbInstr);
5884 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5885}
5886
5887
5888/**
5889 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5890 *
5891 * This enables us to add/remove arguments and force different levels of
5892 * inlining as we wish.
5893 *
5894 * @return Strict VBox status code.
5895 */
5896#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5897IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5898{
5899 NOREF(cbInstr);
5900 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5901}
5902
5903
5904/**
5905 * Macro for calling iemCImplRaiseInvalidOpcode().
5906 *
5907 * This enables us to add/remove arguments and force different levels of
5908 * inlining as we wish.
5909 *
5910 * @return Strict VBox status code.
5911 */
5912#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5913IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5914{
5915 NOREF(cbInstr);
5916 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5917}
5918
5919
5920/** @} */
5921
5922
5923/*
5924 *
5925 * Helpers routines.
5926 * Helpers routines.
5927 * Helpers routines.
5928 *
5929 */
5930
5931/**
5932 * Recalculates the effective operand size.
5933 *
5934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5935 */
5936IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5937{
5938 switch (pVCpu->iem.s.enmCpuMode)
5939 {
5940 case IEMMODE_16BIT:
5941 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5942 break;
5943 case IEMMODE_32BIT:
5944 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5945 break;
5946 case IEMMODE_64BIT:
5947 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5948 {
5949 case 0:
5950 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5951 break;
5952 case IEM_OP_PRF_SIZE_OP:
5953 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5954 break;
5955 case IEM_OP_PRF_SIZE_REX_W:
5956 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5957 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5958 break;
5959 }
5960 break;
5961 default:
5962 AssertFailed();
5963 }
5964}
5965
5966
5967/**
5968 * Sets the default operand size to 64-bit and recalculates the effective
5969 * operand size.
5970 *
5971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5972 */
5973IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5974{
5975 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5976 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5977 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5978 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5979 else
5980 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5981}
5982
5983
5984/*
5985 *
5986 * Common opcode decoders.
5987 * Common opcode decoders.
5988 * Common opcode decoders.
5989 *
5990 */
5991//#include <iprt/mem.h>
5992
5993/**
5994 * Used to add extra details about a stub case.
5995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5996 */
5997IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5998{
5999#if defined(LOG_ENABLED) && defined(IN_RING3)
6000 PVM pVM = pVCpu->CTX_SUFF(pVM);
6001 char szRegs[4096];
6002 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6003 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6004 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6005 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6006 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6007 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6008 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6009 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6010 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6011 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6012 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6013 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6014 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6015 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6016 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6017 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6018 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6019 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6020 " efer=%016VR{efer}\n"
6021 " pat=%016VR{pat}\n"
6022 " sf_mask=%016VR{sf_mask}\n"
6023 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6024 " lstar=%016VR{lstar}\n"
6025 " star=%016VR{star} cstar=%016VR{cstar}\n"
6026 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6027 );
6028
6029 char szInstr[256];
6030 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6031 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6032 szInstr, sizeof(szInstr), NULL);
6033
6034 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6035#else
6036 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6037#endif
6038}
6039
6040/**
6041 * Complains about a stub.
6042 *
6043 * Providing two versions of this macro, one for daily use and one for use when
6044 * working on IEM.
6045 */
6046#if 0
6047# define IEMOP_BITCH_ABOUT_STUB() \
6048 do { \
6049 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6050 iemOpStubMsg2(pVCpu); \
6051 RTAssertPanic(); \
6052 } while (0)
6053#else
6054# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6055#endif
6056
6057/** Stubs an opcode. */
6058#define FNIEMOP_STUB(a_Name) \
6059 FNIEMOP_DEF(a_Name) \
6060 { \
6061 RT_NOREF_PV(pVCpu); \
6062 IEMOP_BITCH_ABOUT_STUB(); \
6063 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6064 } \
6065 typedef int ignore_semicolon
6066
6067/** Stubs an opcode. */
6068#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6069 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6070 { \
6071 RT_NOREF_PV(pVCpu); \
6072 RT_NOREF_PV(a_Name0); \
6073 IEMOP_BITCH_ABOUT_STUB(); \
6074 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6075 } \
6076 typedef int ignore_semicolon
6077
6078/** Stubs an opcode which currently should raise \#UD. */
6079#define FNIEMOP_UD_STUB(a_Name) \
6080 FNIEMOP_DEF(a_Name) \
6081 { \
6082 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6083 return IEMOP_RAISE_INVALID_OPCODE(); \
6084 } \
6085 typedef int ignore_semicolon
6086
6087/** Stubs an opcode which currently should raise \#UD. */
6088#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6089 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6090 { \
6091 RT_NOREF_PV(pVCpu); \
6092 RT_NOREF_PV(a_Name0); \
6093 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6094 return IEMOP_RAISE_INVALID_OPCODE(); \
6095 } \
6096 typedef int ignore_semicolon
6097
6098
6099
6100/** @name Register Access.
6101 * @{
6102 */
6103
6104/**
6105 * Gets a reference (pointer) to the specified hidden segment register.
6106 *
6107 * @returns Hidden register reference.
6108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6109 * @param iSegReg The segment register.
6110 */
6111IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6112{
6113 Assert(iSegReg < X86_SREG_COUNT);
6114 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6115 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6116
6117#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6118 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6119 { /* likely */ }
6120 else
6121 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6122#else
6123 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6124#endif
6125 return pSReg;
6126}
6127
6128
6129/**
6130 * Ensures that the given hidden segment register is up to date.
6131 *
6132 * @returns Hidden register reference.
6133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6134 * @param pSReg The segment register.
6135 */
6136IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6137{
6138#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6139 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6140 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6141#else
6142 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6143 NOREF(pVCpu);
6144#endif
6145 return pSReg;
6146}
6147
6148
6149/**
6150 * Gets a reference (pointer) to the specified segment register (the selector
6151 * value).
6152 *
6153 * @returns Pointer to the selector variable.
6154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6155 * @param iSegReg The segment register.
6156 */
6157DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6158{
6159 Assert(iSegReg < X86_SREG_COUNT);
6160 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6161 return &pCtx->aSRegs[iSegReg].Sel;
6162}
6163
6164
6165/**
6166 * Fetches the selector value of a segment register.
6167 *
6168 * @returns The selector value.
6169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6170 * @param iSegReg The segment register.
6171 */
6172DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6173{
6174 Assert(iSegReg < X86_SREG_COUNT);
6175 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6176}
6177
6178
6179/**
6180 * Fetches the base address value of a segment register.
6181 *
6182 * @returns The selector value.
6183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6184 * @param iSegReg The segment register.
6185 */
6186DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6187{
6188 Assert(iSegReg < X86_SREG_COUNT);
6189 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].u64Base;
6190}
6191
6192
6193/**
6194 * Gets a reference (pointer) to the specified general purpose register.
6195 *
6196 * @returns Register reference.
6197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6198 * @param iReg The general purpose register.
6199 */
6200DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6201{
6202 Assert(iReg < 16);
6203 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6204 return &pCtx->aGRegs[iReg];
6205}
6206
6207
6208/**
6209 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6210 *
6211 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6212 *
6213 * @returns Register reference.
6214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6215 * @param iReg The register.
6216 */
6217DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6218{
6219 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6220 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6221 {
6222 Assert(iReg < 16);
6223 return &pCtx->aGRegs[iReg].u8;
6224 }
6225 /* high 8-bit register. */
6226 Assert(iReg < 8);
6227 return &pCtx->aGRegs[iReg & 3].bHi;
6228}
6229
6230
6231/**
6232 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6233 *
6234 * @returns Register reference.
6235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6236 * @param iReg The register.
6237 */
6238DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6239{
6240 Assert(iReg < 16);
6241 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6242 return &pCtx->aGRegs[iReg].u16;
6243}
6244
6245
6246/**
6247 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6248 *
6249 * @returns Register reference.
6250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6251 * @param iReg The register.
6252 */
6253DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6254{
6255 Assert(iReg < 16);
6256 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6257 return &pCtx->aGRegs[iReg].u32;
6258}
6259
6260
6261/**
6262 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6263 *
6264 * @returns Register reference.
6265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6266 * @param iReg The register.
6267 */
6268DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6269{
6270 Assert(iReg < 64);
6271 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6272 return &pCtx->aGRegs[iReg].u64;
6273}
6274
6275
6276/**
6277 * Gets a reference (pointer) to the specified segment register's base address.
6278 *
6279 * @returns Segment register base address reference.
6280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6281 * @param iSegReg The segment selector.
6282 */
6283DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6284{
6285 Assert(iSegReg < X86_SREG_COUNT);
6286 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6287 return &pCtx->aSRegs[iSegReg].u64Base;
6288}
6289
6290
6291/**
6292 * Fetches the value of a 8-bit general purpose register.
6293 *
6294 * @returns The register value.
6295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6296 * @param iReg The register.
6297 */
6298DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6299{
6300 return *iemGRegRefU8(pVCpu, iReg);
6301}
6302
6303
6304/**
6305 * Fetches the value of a 16-bit general purpose register.
6306 *
6307 * @returns The register value.
6308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6309 * @param iReg The register.
6310 */
6311DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6312{
6313 Assert(iReg < 16);
6314 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6315}
6316
6317
6318/**
6319 * Fetches the value of a 32-bit general purpose register.
6320 *
6321 * @returns The register value.
6322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6323 * @param iReg The register.
6324 */
6325DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6326{
6327 Assert(iReg < 16);
6328 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6329}
6330
6331
6332/**
6333 * Fetches the value of a 64-bit general purpose register.
6334 *
6335 * @returns The register value.
6336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6337 * @param iReg The register.
6338 */
6339DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6340{
6341 Assert(iReg < 16);
6342 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6343}
6344
6345
6346/**
6347 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6348 *
6349 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6350 * segment limit.
6351 *
6352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6353 * @param offNextInstr The offset of the next instruction.
6354 */
6355IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6356{
6357 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6358 switch (pVCpu->iem.s.enmEffOpSize)
6359 {
6360 case IEMMODE_16BIT:
6361 {
6362 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6363 if ( uNewIp > pCtx->cs.u32Limit
6364 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6365 return iemRaiseGeneralProtectionFault0(pVCpu);
6366 pCtx->rip = uNewIp;
6367 break;
6368 }
6369
6370 case IEMMODE_32BIT:
6371 {
6372 Assert(pCtx->rip <= UINT32_MAX);
6373 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6374
6375 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6376 if (uNewEip > pCtx->cs.u32Limit)
6377 return iemRaiseGeneralProtectionFault0(pVCpu);
6378 pCtx->rip = uNewEip;
6379 break;
6380 }
6381
6382 case IEMMODE_64BIT:
6383 {
6384 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6385
6386 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6387 if (!IEM_IS_CANONICAL(uNewRip))
6388 return iemRaiseGeneralProtectionFault0(pVCpu);
6389 pCtx->rip = uNewRip;
6390 break;
6391 }
6392
6393 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6394 }
6395
6396 pCtx->eflags.Bits.u1RF = 0;
6397
6398#ifndef IEM_WITH_CODE_TLB
6399 /* Flush the prefetch buffer. */
6400 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6401#endif
6402
6403 return VINF_SUCCESS;
6404}
6405
6406
6407/**
6408 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6409 *
6410 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6411 * segment limit.
6412 *
6413 * @returns Strict VBox status code.
6414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6415 * @param offNextInstr The offset of the next instruction.
6416 */
6417IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6418{
6419 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6420 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6421
6422 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6423 if ( uNewIp > pCtx->cs.u32Limit
6424 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6425 return iemRaiseGeneralProtectionFault0(pVCpu);
6426 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6427 pCtx->rip = uNewIp;
6428 pCtx->eflags.Bits.u1RF = 0;
6429
6430#ifndef IEM_WITH_CODE_TLB
6431 /* Flush the prefetch buffer. */
6432 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6433#endif
6434
6435 return VINF_SUCCESS;
6436}
6437
6438
6439/**
6440 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6441 *
6442 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6443 * segment limit.
6444 *
6445 * @returns Strict VBox status code.
6446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6447 * @param offNextInstr The offset of the next instruction.
6448 */
6449IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6450{
6451 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6452 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6453
6454 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6455 {
6456 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6457
6458 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6459 if (uNewEip > pCtx->cs.u32Limit)
6460 return iemRaiseGeneralProtectionFault0(pVCpu);
6461 pCtx->rip = uNewEip;
6462 }
6463 else
6464 {
6465 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6466
6467 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6468 if (!IEM_IS_CANONICAL(uNewRip))
6469 return iemRaiseGeneralProtectionFault0(pVCpu);
6470 pCtx->rip = uNewRip;
6471 }
6472 pCtx->eflags.Bits.u1RF = 0;
6473
6474#ifndef IEM_WITH_CODE_TLB
6475 /* Flush the prefetch buffer. */
6476 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6477#endif
6478
6479 return VINF_SUCCESS;
6480}
6481
6482
6483/**
6484 * Performs a near jump to the specified address.
6485 *
6486 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6487 * segment limit.
6488 *
6489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6490 * @param uNewRip The new RIP value.
6491 */
6492IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6493{
6494 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6495 switch (pVCpu->iem.s.enmEffOpSize)
6496 {
6497 case IEMMODE_16BIT:
6498 {
6499 Assert(uNewRip <= UINT16_MAX);
6500 if ( uNewRip > pCtx->cs.u32Limit
6501 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6502 return iemRaiseGeneralProtectionFault0(pVCpu);
6503 /** @todo Test 16-bit jump in 64-bit mode. */
6504 pCtx->rip = uNewRip;
6505 break;
6506 }
6507
6508 case IEMMODE_32BIT:
6509 {
6510 Assert(uNewRip <= UINT32_MAX);
6511 Assert(pCtx->rip <= UINT32_MAX);
6512 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6513
6514 if (uNewRip > pCtx->cs.u32Limit)
6515 return iemRaiseGeneralProtectionFault0(pVCpu);
6516 pCtx->rip = uNewRip;
6517 break;
6518 }
6519
6520 case IEMMODE_64BIT:
6521 {
6522 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6523
6524 if (!IEM_IS_CANONICAL(uNewRip))
6525 return iemRaiseGeneralProtectionFault0(pVCpu);
6526 pCtx->rip = uNewRip;
6527 break;
6528 }
6529
6530 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6531 }
6532
6533 pCtx->eflags.Bits.u1RF = 0;
6534
6535#ifndef IEM_WITH_CODE_TLB
6536 /* Flush the prefetch buffer. */
6537 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6538#endif
6539
6540 return VINF_SUCCESS;
6541}
6542
6543
6544/**
6545 * Get the address of the top of the stack.
6546 *
6547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6548 * @param pCtx The CPU context which SP/ESP/RSP should be
6549 * read.
6550 */
6551DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6552{
6553 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6554 return pCtx->rsp;
6555 if (pCtx->ss.Attr.n.u1DefBig)
6556 return pCtx->esp;
6557 return pCtx->sp;
6558}
6559
6560
6561/**
6562 * Updates the RIP/EIP/IP to point to the next instruction.
6563 *
6564 * This function leaves the EFLAGS.RF flag alone.
6565 *
6566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6567 * @param cbInstr The number of bytes to add.
6568 */
6569IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6570{
6571 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6572 switch (pVCpu->iem.s.enmCpuMode)
6573 {
6574 case IEMMODE_16BIT:
6575 Assert(pCtx->rip <= UINT16_MAX);
6576 pCtx->eip += cbInstr;
6577 pCtx->eip &= UINT32_C(0xffff);
6578 break;
6579
6580 case IEMMODE_32BIT:
6581 pCtx->eip += cbInstr;
6582 Assert(pCtx->rip <= UINT32_MAX);
6583 break;
6584
6585 case IEMMODE_64BIT:
6586 pCtx->rip += cbInstr;
6587 break;
6588 default: AssertFailed();
6589 }
6590}
6591
6592
6593#if 0
6594/**
6595 * Updates the RIP/EIP/IP to point to the next instruction.
6596 *
6597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6598 */
6599IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6600{
6601 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6602}
6603#endif
6604
6605
6606
6607/**
6608 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6609 *
6610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6611 * @param cbInstr The number of bytes to add.
6612 */
6613IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6614{
6615 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6616
6617 pCtx->eflags.Bits.u1RF = 0;
6618
6619 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6620#if ARCH_BITS >= 64
6621 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6622 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6623 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6624#else
6625 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6626 pCtx->rip += cbInstr;
6627 else
6628 pCtx->eip += cbInstr;
6629#endif
6630}
6631
6632
6633/**
6634 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6635 *
6636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6637 */
6638IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6639{
6640 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6641}
6642
6643
6644/**
6645 * Adds to the stack pointer.
6646 *
6647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6648 * @param pCtx The CPU context which SP/ESP/RSP should be
6649 * updated.
6650 * @param cbToAdd The number of bytes to add (8-bit!).
6651 */
6652DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6653{
6654 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6655 pCtx->rsp += cbToAdd;
6656 else if (pCtx->ss.Attr.n.u1DefBig)
6657 pCtx->esp += cbToAdd;
6658 else
6659 pCtx->sp += cbToAdd;
6660}
6661
6662
6663/**
6664 * Subtracts from the stack pointer.
6665 *
6666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6667 * @param pCtx The CPU context which SP/ESP/RSP should be
6668 * updated.
6669 * @param cbToSub The number of bytes to subtract (8-bit!).
6670 */
6671DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6672{
6673 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6674 pCtx->rsp -= cbToSub;
6675 else if (pCtx->ss.Attr.n.u1DefBig)
6676 pCtx->esp -= cbToSub;
6677 else
6678 pCtx->sp -= cbToSub;
6679}
6680
6681
6682/**
6683 * Adds to the temporary stack pointer.
6684 *
6685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6686 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6687 * @param cbToAdd The number of bytes to add (16-bit).
6688 * @param pCtx Where to get the current stack mode.
6689 */
6690DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6691{
6692 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6693 pTmpRsp->u += cbToAdd;
6694 else if (pCtx->ss.Attr.n.u1DefBig)
6695 pTmpRsp->DWords.dw0 += cbToAdd;
6696 else
6697 pTmpRsp->Words.w0 += cbToAdd;
6698}
6699
6700
6701/**
6702 * Subtracts from the temporary stack pointer.
6703 *
6704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6705 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6706 * @param cbToSub The number of bytes to subtract.
6707 * @param pCtx Where to get the current stack mode.
6708 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6709 * expecting that.
6710 */
6711DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6712{
6713 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6714 pTmpRsp->u -= cbToSub;
6715 else if (pCtx->ss.Attr.n.u1DefBig)
6716 pTmpRsp->DWords.dw0 -= cbToSub;
6717 else
6718 pTmpRsp->Words.w0 -= cbToSub;
6719}
6720
6721
6722/**
6723 * Calculates the effective stack address for a push of the specified size as
6724 * well as the new RSP value (upper bits may be masked).
6725 *
6726 * @returns Effective stack addressf for the push.
6727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6728 * @param pCtx Where to get the current stack mode.
6729 * @param cbItem The size of the stack item to pop.
6730 * @param puNewRsp Where to return the new RSP value.
6731 */
6732DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6733{
6734 RTUINT64U uTmpRsp;
6735 RTGCPTR GCPtrTop;
6736 uTmpRsp.u = pCtx->rsp;
6737
6738 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6739 GCPtrTop = uTmpRsp.u -= cbItem;
6740 else if (pCtx->ss.Attr.n.u1DefBig)
6741 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6742 else
6743 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6744 *puNewRsp = uTmpRsp.u;
6745 return GCPtrTop;
6746}
6747
6748
6749/**
6750 * Gets the current stack pointer and calculates the value after a pop of the
6751 * specified size.
6752 *
6753 * @returns Current stack pointer.
6754 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6755 * @param pCtx Where to get the current stack mode.
6756 * @param cbItem The size of the stack item to pop.
6757 * @param puNewRsp Where to return the new RSP value.
6758 */
6759DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6760{
6761 RTUINT64U uTmpRsp;
6762 RTGCPTR GCPtrTop;
6763 uTmpRsp.u = pCtx->rsp;
6764
6765 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6766 {
6767 GCPtrTop = uTmpRsp.u;
6768 uTmpRsp.u += cbItem;
6769 }
6770 else if (pCtx->ss.Attr.n.u1DefBig)
6771 {
6772 GCPtrTop = uTmpRsp.DWords.dw0;
6773 uTmpRsp.DWords.dw0 += cbItem;
6774 }
6775 else
6776 {
6777 GCPtrTop = uTmpRsp.Words.w0;
6778 uTmpRsp.Words.w0 += cbItem;
6779 }
6780 *puNewRsp = uTmpRsp.u;
6781 return GCPtrTop;
6782}
6783
6784
6785/**
6786 * Calculates the effective stack address for a push of the specified size as
6787 * well as the new temporary RSP value (upper bits may be masked).
6788 *
6789 * @returns Effective stack addressf for the push.
6790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6791 * @param pCtx Where to get the current stack mode.
6792 * @param pTmpRsp The temporary stack pointer. This is updated.
6793 * @param cbItem The size of the stack item to pop.
6794 */
6795DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6796{
6797 RTGCPTR GCPtrTop;
6798
6799 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6800 GCPtrTop = pTmpRsp->u -= cbItem;
6801 else if (pCtx->ss.Attr.n.u1DefBig)
6802 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6803 else
6804 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6805 return GCPtrTop;
6806}
6807
6808
6809/**
6810 * Gets the effective stack address for a pop of the specified size and
6811 * calculates and updates the temporary RSP.
6812 *
6813 * @returns Current stack pointer.
6814 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6815 * @param pCtx Where to get the current stack mode.
6816 * @param pTmpRsp The temporary stack pointer. This is updated.
6817 * @param cbItem The size of the stack item to pop.
6818 */
6819DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6820{
6821 RTGCPTR GCPtrTop;
6822 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6823 {
6824 GCPtrTop = pTmpRsp->u;
6825 pTmpRsp->u += cbItem;
6826 }
6827 else if (pCtx->ss.Attr.n.u1DefBig)
6828 {
6829 GCPtrTop = pTmpRsp->DWords.dw0;
6830 pTmpRsp->DWords.dw0 += cbItem;
6831 }
6832 else
6833 {
6834 GCPtrTop = pTmpRsp->Words.w0;
6835 pTmpRsp->Words.w0 += cbItem;
6836 }
6837 return GCPtrTop;
6838}
6839
6840/** @} */
6841
6842
6843/** @name FPU access and helpers.
6844 *
6845 * @{
6846 */
6847
6848
6849/**
6850 * Hook for preparing to use the host FPU.
6851 *
6852 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6853 *
6854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6855 */
6856DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6857{
6858#ifdef IN_RING3
6859 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6860#else
6861 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6862#endif
6863}
6864
6865
6866/**
6867 * Hook for preparing to use the host FPU for SSE.
6868 *
6869 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6870 *
6871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6872 */
6873DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6874{
6875 iemFpuPrepareUsage(pVCpu);
6876}
6877
6878
6879/**
6880 * Hook for preparing to use the host FPU for AVX.
6881 *
6882 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6883 *
6884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6885 */
6886DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6887{
6888 iemFpuPrepareUsage(pVCpu);
6889}
6890
6891
6892/**
6893 * Hook for actualizing the guest FPU state before the interpreter reads it.
6894 *
6895 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6896 *
6897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6898 */
6899DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6900{
6901#ifdef IN_RING3
6902 NOREF(pVCpu);
6903#else
6904 CPUMRZFpuStateActualizeForRead(pVCpu);
6905#endif
6906}
6907
6908
6909/**
6910 * Hook for actualizing the guest FPU state before the interpreter changes it.
6911 *
6912 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6913 *
6914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6915 */
6916DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6917{
6918#ifdef IN_RING3
6919 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6920#else
6921 CPUMRZFpuStateActualizeForChange(pVCpu);
6922#endif
6923}
6924
6925
6926/**
6927 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6928 * only.
6929 *
6930 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6931 *
6932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6933 */
6934DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6935{
6936#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6937 NOREF(pVCpu);
6938#else
6939 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6940#endif
6941}
6942
6943
6944/**
6945 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6946 * read+write.
6947 *
6948 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6949 *
6950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6951 */
6952DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6953{
6954#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6955 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6956#else
6957 CPUMRZFpuStateActualizeForChange(pVCpu);
6958#endif
6959}
6960
6961
6962/**
6963 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6964 * only.
6965 *
6966 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6967 *
6968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6969 */
6970DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6971{
6972#ifdef IN_RING3
6973 NOREF(pVCpu);
6974#else
6975 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6976#endif
6977}
6978
6979
6980/**
6981 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6982 * read+write.
6983 *
6984 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6985 *
6986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6987 */
6988DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
6989{
6990#ifdef IN_RING3
6991 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6992#else
6993 CPUMRZFpuStateActualizeForChange(pVCpu);
6994#endif
6995}
6996
6997
6998/**
6999 * Stores a QNaN value into a FPU register.
7000 *
7001 * @param pReg Pointer to the register.
7002 */
7003DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7004{
7005 pReg->au32[0] = UINT32_C(0x00000000);
7006 pReg->au32[1] = UINT32_C(0xc0000000);
7007 pReg->au16[4] = UINT16_C(0xffff);
7008}
7009
7010
7011/**
7012 * Updates the FOP, FPU.CS and FPUIP registers.
7013 *
7014 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7015 * @param pCtx The CPU context.
7016 * @param pFpuCtx The FPU context.
7017 */
7018DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
7019{
7020 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7021 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7022 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7023 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7024 {
7025 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7026 * happens in real mode here based on the fnsave and fnstenv images. */
7027 pFpuCtx->CS = 0;
7028 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
7029 }
7030 else
7031 {
7032 pFpuCtx->CS = pCtx->cs.Sel;
7033 pFpuCtx->FPUIP = pCtx->rip;
7034 }
7035}
7036
7037
7038/**
7039 * Updates the x87.DS and FPUDP registers.
7040 *
7041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7042 * @param pCtx The CPU context.
7043 * @param pFpuCtx The FPU context.
7044 * @param iEffSeg The effective segment register.
7045 * @param GCPtrEff The effective address relative to @a iEffSeg.
7046 */
7047DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7048{
7049 RTSEL sel;
7050 switch (iEffSeg)
7051 {
7052 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7053 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7054 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7055 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7056 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7057 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7058 default:
7059 AssertMsgFailed(("%d\n", iEffSeg));
7060 sel = pCtx->ds.Sel;
7061 }
7062 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7063 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7064 {
7065 pFpuCtx->DS = 0;
7066 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7067 }
7068 else
7069 {
7070 pFpuCtx->DS = sel;
7071 pFpuCtx->FPUDP = GCPtrEff;
7072 }
7073}
7074
7075
7076/**
7077 * Rotates the stack registers in the push direction.
7078 *
7079 * @param pFpuCtx The FPU context.
7080 * @remarks This is a complete waste of time, but fxsave stores the registers in
7081 * stack order.
7082 */
7083DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7084{
7085 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7086 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7087 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7088 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7089 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7090 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7091 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7092 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7093 pFpuCtx->aRegs[0].r80 = r80Tmp;
7094}
7095
7096
7097/**
7098 * Rotates the stack registers in the pop direction.
7099 *
7100 * @param pFpuCtx The FPU context.
7101 * @remarks This is a complete waste of time, but fxsave stores the registers in
7102 * stack order.
7103 */
7104DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7105{
7106 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7107 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7108 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7109 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7110 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7111 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7112 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7113 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7114 pFpuCtx->aRegs[7].r80 = r80Tmp;
7115}
7116
7117
7118/**
7119 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7120 * exception prevents it.
7121 *
7122 * @param pResult The FPU operation result to push.
7123 * @param pFpuCtx The FPU context.
7124 */
7125IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7126{
7127 /* Update FSW and bail if there are pending exceptions afterwards. */
7128 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7129 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7130 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7131 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7132 {
7133 pFpuCtx->FSW = fFsw;
7134 return;
7135 }
7136
7137 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7138 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7139 {
7140 /* All is fine, push the actual value. */
7141 pFpuCtx->FTW |= RT_BIT(iNewTop);
7142 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7143 }
7144 else if (pFpuCtx->FCW & X86_FCW_IM)
7145 {
7146 /* Masked stack overflow, push QNaN. */
7147 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7148 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7149 }
7150 else
7151 {
7152 /* Raise stack overflow, don't push anything. */
7153 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7154 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7155 return;
7156 }
7157
7158 fFsw &= ~X86_FSW_TOP_MASK;
7159 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7160 pFpuCtx->FSW = fFsw;
7161
7162 iemFpuRotateStackPush(pFpuCtx);
7163}
7164
7165
7166/**
7167 * Stores a result in a FPU register and updates the FSW and FTW.
7168 *
7169 * @param pFpuCtx The FPU context.
7170 * @param pResult The result to store.
7171 * @param iStReg Which FPU register to store it in.
7172 */
7173IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7174{
7175 Assert(iStReg < 8);
7176 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7177 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7178 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7179 pFpuCtx->FTW |= RT_BIT(iReg);
7180 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7181}
7182
7183
7184/**
7185 * Only updates the FPU status word (FSW) with the result of the current
7186 * instruction.
7187 *
7188 * @param pFpuCtx The FPU context.
7189 * @param u16FSW The FSW output of the current instruction.
7190 */
7191IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7192{
7193 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7194 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7195}
7196
7197
7198/**
7199 * Pops one item off the FPU stack if no pending exception prevents it.
7200 *
7201 * @param pFpuCtx The FPU context.
7202 */
7203IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7204{
7205 /* Check pending exceptions. */
7206 uint16_t uFSW = pFpuCtx->FSW;
7207 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7208 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7209 return;
7210
7211 /* TOP--. */
7212 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7213 uFSW &= ~X86_FSW_TOP_MASK;
7214 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7215 pFpuCtx->FSW = uFSW;
7216
7217 /* Mark the previous ST0 as empty. */
7218 iOldTop >>= X86_FSW_TOP_SHIFT;
7219 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7220
7221 /* Rotate the registers. */
7222 iemFpuRotateStackPop(pFpuCtx);
7223}
7224
7225
7226/**
7227 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7228 *
7229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7230 * @param pResult The FPU operation result to push.
7231 */
7232IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7233{
7234 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7235 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7236 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7237 iemFpuMaybePushResult(pResult, pFpuCtx);
7238}
7239
7240
7241/**
7242 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7243 * and sets FPUDP and FPUDS.
7244 *
7245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7246 * @param pResult The FPU operation result to push.
7247 * @param iEffSeg The effective segment register.
7248 * @param GCPtrEff The effective address relative to @a iEffSeg.
7249 */
7250IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7251{
7252 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7253 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7254 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7255 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7256 iemFpuMaybePushResult(pResult, pFpuCtx);
7257}
7258
7259
7260/**
7261 * Replace ST0 with the first value and push the second onto the FPU stack,
7262 * unless a pending exception prevents it.
7263 *
7264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7265 * @param pResult The FPU operation result to store and push.
7266 */
7267IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7268{
7269 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7270 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7271 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7272
7273 /* Update FSW and bail if there are pending exceptions afterwards. */
7274 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7275 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7276 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7277 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7278 {
7279 pFpuCtx->FSW = fFsw;
7280 return;
7281 }
7282
7283 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7284 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7285 {
7286 /* All is fine, push the actual value. */
7287 pFpuCtx->FTW |= RT_BIT(iNewTop);
7288 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7289 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7290 }
7291 else if (pFpuCtx->FCW & X86_FCW_IM)
7292 {
7293 /* Masked stack overflow, push QNaN. */
7294 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7295 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7296 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7297 }
7298 else
7299 {
7300 /* Raise stack overflow, don't push anything. */
7301 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7302 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7303 return;
7304 }
7305
7306 fFsw &= ~X86_FSW_TOP_MASK;
7307 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7308 pFpuCtx->FSW = fFsw;
7309
7310 iemFpuRotateStackPush(pFpuCtx);
7311}
7312
7313
7314/**
7315 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7316 * FOP.
7317 *
7318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7319 * @param pResult The result to store.
7320 * @param iStReg Which FPU register to store it in.
7321 */
7322IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7323{
7324 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7325 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7326 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7327 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7328}
7329
7330
7331/**
7332 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7333 * FOP, and then pops the stack.
7334 *
7335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7336 * @param pResult The result to store.
7337 * @param iStReg Which FPU register to store it in.
7338 */
7339IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7340{
7341 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7342 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7343 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7344 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7345 iemFpuMaybePopOne(pFpuCtx);
7346}
7347
7348
7349/**
7350 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7351 * FPUDP, and FPUDS.
7352 *
7353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7354 * @param pResult The result to store.
7355 * @param iStReg Which FPU register to store it in.
7356 * @param iEffSeg The effective memory operand selector register.
7357 * @param GCPtrEff The effective memory operand offset.
7358 */
7359IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7360 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7361{
7362 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7363 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7364 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7365 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7366 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7367}
7368
7369
7370/**
7371 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7372 * FPUDP, and FPUDS, and then pops the stack.
7373 *
7374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7375 * @param pResult The result to store.
7376 * @param iStReg Which FPU register to store it in.
7377 * @param iEffSeg The effective memory operand selector register.
7378 * @param GCPtrEff The effective memory operand offset.
7379 */
7380IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7381 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7382{
7383 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7384 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7385 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7386 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7387 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7388 iemFpuMaybePopOne(pFpuCtx);
7389}
7390
7391
7392/**
7393 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7394 *
7395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7396 */
7397IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7398{
7399 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7400 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7401 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7402}
7403
7404
7405/**
7406 * Marks the specified stack register as free (for FFREE).
7407 *
7408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7409 * @param iStReg The register to free.
7410 */
7411IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7412{
7413 Assert(iStReg < 8);
7414 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7415 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7416 pFpuCtx->FTW &= ~RT_BIT(iReg);
7417}
7418
7419
7420/**
7421 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7422 *
7423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7424 */
7425IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7426{
7427 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7428 uint16_t uFsw = pFpuCtx->FSW;
7429 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7430 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7431 uFsw &= ~X86_FSW_TOP_MASK;
7432 uFsw |= uTop;
7433 pFpuCtx->FSW = uFsw;
7434}
7435
7436
7437/**
7438 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7439 *
7440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7441 */
7442IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7443{
7444 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7445 uint16_t uFsw = pFpuCtx->FSW;
7446 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7447 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7448 uFsw &= ~X86_FSW_TOP_MASK;
7449 uFsw |= uTop;
7450 pFpuCtx->FSW = uFsw;
7451}
7452
7453
7454/**
7455 * Updates the FSW, FOP, FPUIP, and FPUCS.
7456 *
7457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7458 * @param u16FSW The FSW from the current instruction.
7459 */
7460IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7461{
7462 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7463 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7464 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7465 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7466}
7467
7468
7469/**
7470 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7471 *
7472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7473 * @param u16FSW The FSW from the current instruction.
7474 */
7475IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7476{
7477 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7478 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7479 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7480 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7481 iemFpuMaybePopOne(pFpuCtx);
7482}
7483
7484
7485/**
7486 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7487 *
7488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7489 * @param u16FSW The FSW from the current instruction.
7490 * @param iEffSeg The effective memory operand selector register.
7491 * @param GCPtrEff The effective memory operand offset.
7492 */
7493IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7494{
7495 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7496 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7497 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7498 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7499 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7500}
7501
7502
7503/**
7504 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7505 *
7506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7507 * @param u16FSW The FSW from the current instruction.
7508 */
7509IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7510{
7511 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7512 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7513 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7514 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7515 iemFpuMaybePopOne(pFpuCtx);
7516 iemFpuMaybePopOne(pFpuCtx);
7517}
7518
7519
7520/**
7521 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7522 *
7523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7524 * @param u16FSW The FSW from the current instruction.
7525 * @param iEffSeg The effective memory operand selector register.
7526 * @param GCPtrEff The effective memory operand offset.
7527 */
7528IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7529{
7530 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7531 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7532 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7533 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7534 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7535 iemFpuMaybePopOne(pFpuCtx);
7536}
7537
7538
7539/**
7540 * Worker routine for raising an FPU stack underflow exception.
7541 *
7542 * @param pFpuCtx The FPU context.
7543 * @param iStReg The stack register being accessed.
7544 */
7545IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7546{
7547 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7548 if (pFpuCtx->FCW & X86_FCW_IM)
7549 {
7550 /* Masked underflow. */
7551 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7552 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7553 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7554 if (iStReg != UINT8_MAX)
7555 {
7556 pFpuCtx->FTW |= RT_BIT(iReg);
7557 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7558 }
7559 }
7560 else
7561 {
7562 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7563 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7564 }
7565}
7566
7567
7568/**
7569 * Raises a FPU stack underflow exception.
7570 *
7571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7572 * @param iStReg The destination register that should be loaded
7573 * with QNaN if \#IS is not masked. Specify
7574 * UINT8_MAX if none (like for fcom).
7575 */
7576DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7577{
7578 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7579 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7580 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7581 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7582}
7583
7584
7585DECL_NO_INLINE(IEM_STATIC, void)
7586iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7587{
7588 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7589 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7590 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7591 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7592 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7593}
7594
7595
7596DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7597{
7598 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7599 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7600 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7601 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7602 iemFpuMaybePopOne(pFpuCtx);
7603}
7604
7605
7606DECL_NO_INLINE(IEM_STATIC, void)
7607iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7608{
7609 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7610 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7611 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7612 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7613 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7614 iemFpuMaybePopOne(pFpuCtx);
7615}
7616
7617
7618DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7619{
7620 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7621 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7622 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7623 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7624 iemFpuMaybePopOne(pFpuCtx);
7625 iemFpuMaybePopOne(pFpuCtx);
7626}
7627
7628
7629DECL_NO_INLINE(IEM_STATIC, void)
7630iemFpuStackPushUnderflow(PVMCPU pVCpu)
7631{
7632 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7633 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7634 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7635
7636 if (pFpuCtx->FCW & X86_FCW_IM)
7637 {
7638 /* Masked overflow - Push QNaN. */
7639 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7640 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7641 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7642 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7643 pFpuCtx->FTW |= RT_BIT(iNewTop);
7644 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7645 iemFpuRotateStackPush(pFpuCtx);
7646 }
7647 else
7648 {
7649 /* Exception pending - don't change TOP or the register stack. */
7650 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7651 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7652 }
7653}
7654
7655
7656DECL_NO_INLINE(IEM_STATIC, void)
7657iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7658{
7659 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7660 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7661 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7662
7663 if (pFpuCtx->FCW & X86_FCW_IM)
7664 {
7665 /* Masked overflow - Push QNaN. */
7666 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7667 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7668 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7669 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7670 pFpuCtx->FTW |= RT_BIT(iNewTop);
7671 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7672 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7673 iemFpuRotateStackPush(pFpuCtx);
7674 }
7675 else
7676 {
7677 /* Exception pending - don't change TOP or the register stack. */
7678 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7679 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7680 }
7681}
7682
7683
7684/**
7685 * Worker routine for raising an FPU stack overflow exception on a push.
7686 *
7687 * @param pFpuCtx The FPU context.
7688 */
7689IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7690{
7691 if (pFpuCtx->FCW & X86_FCW_IM)
7692 {
7693 /* Masked overflow. */
7694 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7695 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7696 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7697 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7698 pFpuCtx->FTW |= RT_BIT(iNewTop);
7699 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7700 iemFpuRotateStackPush(pFpuCtx);
7701 }
7702 else
7703 {
7704 /* Exception pending - don't change TOP or the register stack. */
7705 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7706 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7707 }
7708}
7709
7710
7711/**
7712 * Raises a FPU stack overflow exception on a push.
7713 *
7714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7715 */
7716DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7717{
7718 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7719 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7720 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7721 iemFpuStackPushOverflowOnly(pFpuCtx);
7722}
7723
7724
7725/**
7726 * Raises a FPU stack overflow exception on a push with a memory operand.
7727 *
7728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7729 * @param iEffSeg The effective memory operand selector register.
7730 * @param GCPtrEff The effective memory operand offset.
7731 */
7732DECL_NO_INLINE(IEM_STATIC, void)
7733iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7734{
7735 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7736 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7737 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7738 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7739 iemFpuStackPushOverflowOnly(pFpuCtx);
7740}
7741
7742
7743IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7744{
7745 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7746 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7747 if (pFpuCtx->FTW & RT_BIT(iReg))
7748 return VINF_SUCCESS;
7749 return VERR_NOT_FOUND;
7750}
7751
7752
7753IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7754{
7755 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7756 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7757 if (pFpuCtx->FTW & RT_BIT(iReg))
7758 {
7759 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7760 return VINF_SUCCESS;
7761 }
7762 return VERR_NOT_FOUND;
7763}
7764
7765
7766IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7767 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7768{
7769 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7770 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7771 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7772 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7773 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7774 {
7775 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7776 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7777 return VINF_SUCCESS;
7778 }
7779 return VERR_NOT_FOUND;
7780}
7781
7782
7783IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7784{
7785 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7786 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7787 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7788 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7789 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7790 {
7791 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7792 return VINF_SUCCESS;
7793 }
7794 return VERR_NOT_FOUND;
7795}
7796
7797
7798/**
7799 * Updates the FPU exception status after FCW is changed.
7800 *
7801 * @param pFpuCtx The FPU context.
7802 */
7803IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7804{
7805 uint16_t u16Fsw = pFpuCtx->FSW;
7806 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7807 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7808 else
7809 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7810 pFpuCtx->FSW = u16Fsw;
7811}
7812
7813
7814/**
7815 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7816 *
7817 * @returns The full FTW.
7818 * @param pFpuCtx The FPU context.
7819 */
7820IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7821{
7822 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7823 uint16_t u16Ftw = 0;
7824 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7825 for (unsigned iSt = 0; iSt < 8; iSt++)
7826 {
7827 unsigned const iReg = (iSt + iTop) & 7;
7828 if (!(u8Ftw & RT_BIT(iReg)))
7829 u16Ftw |= 3 << (iReg * 2); /* empty */
7830 else
7831 {
7832 uint16_t uTag;
7833 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7834 if (pr80Reg->s.uExponent == 0x7fff)
7835 uTag = 2; /* Exponent is all 1's => Special. */
7836 else if (pr80Reg->s.uExponent == 0x0000)
7837 {
7838 if (pr80Reg->s.u64Mantissa == 0x0000)
7839 uTag = 1; /* All bits are zero => Zero. */
7840 else
7841 uTag = 2; /* Must be special. */
7842 }
7843 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7844 uTag = 0; /* Valid. */
7845 else
7846 uTag = 2; /* Must be special. */
7847
7848 u16Ftw |= uTag << (iReg * 2); /* empty */
7849 }
7850 }
7851
7852 return u16Ftw;
7853}
7854
7855
7856/**
7857 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7858 *
7859 * @returns The compressed FTW.
7860 * @param u16FullFtw The full FTW to convert.
7861 */
7862IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7863{
7864 uint8_t u8Ftw = 0;
7865 for (unsigned i = 0; i < 8; i++)
7866 {
7867 if ((u16FullFtw & 3) != 3 /*empty*/)
7868 u8Ftw |= RT_BIT(i);
7869 u16FullFtw >>= 2;
7870 }
7871
7872 return u8Ftw;
7873}
7874
7875/** @} */
7876
7877
7878/** @name Memory access.
7879 *
7880 * @{
7881 */
7882
7883
7884/**
7885 * Updates the IEMCPU::cbWritten counter if applicable.
7886 *
7887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7888 * @param fAccess The access being accounted for.
7889 * @param cbMem The access size.
7890 */
7891DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7892{
7893 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7894 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7895 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7896}
7897
7898
7899/**
7900 * Checks if the given segment can be written to, raise the appropriate
7901 * exception if not.
7902 *
7903 * @returns VBox strict status code.
7904 *
7905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7906 * @param pHid Pointer to the hidden register.
7907 * @param iSegReg The register number.
7908 * @param pu64BaseAddr Where to return the base address to use for the
7909 * segment. (In 64-bit code it may differ from the
7910 * base in the hidden segment.)
7911 */
7912IEM_STATIC VBOXSTRICTRC
7913iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7914{
7915 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7916 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7917 else
7918 {
7919 if (!pHid->Attr.n.u1Present)
7920 {
7921 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7922 AssertRelease(uSel == 0);
7923 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7924 return iemRaiseGeneralProtectionFault0(pVCpu);
7925 }
7926
7927 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7928 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7929 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7930 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7931 *pu64BaseAddr = pHid->u64Base;
7932 }
7933 return VINF_SUCCESS;
7934}
7935
7936
7937/**
7938 * Checks if the given segment can be read from, raise the appropriate
7939 * exception if not.
7940 *
7941 * @returns VBox strict status code.
7942 *
7943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7944 * @param pHid Pointer to the hidden register.
7945 * @param iSegReg The register number.
7946 * @param pu64BaseAddr Where to return the base address to use for the
7947 * segment. (In 64-bit code it may differ from the
7948 * base in the hidden segment.)
7949 */
7950IEM_STATIC VBOXSTRICTRC
7951iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7952{
7953 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7954 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7955 else
7956 {
7957 if (!pHid->Attr.n.u1Present)
7958 {
7959 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7960 AssertRelease(uSel == 0);
7961 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7962 return iemRaiseGeneralProtectionFault0(pVCpu);
7963 }
7964
7965 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7966 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7967 *pu64BaseAddr = pHid->u64Base;
7968 }
7969 return VINF_SUCCESS;
7970}
7971
7972
7973/**
7974 * Applies the segment limit, base and attributes.
7975 *
7976 * This may raise a \#GP or \#SS.
7977 *
7978 * @returns VBox strict status code.
7979 *
7980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7981 * @param fAccess The kind of access which is being performed.
7982 * @param iSegReg The index of the segment register to apply.
7983 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7984 * TSS, ++).
7985 * @param cbMem The access size.
7986 * @param pGCPtrMem Pointer to the guest memory address to apply
7987 * segmentation to. Input and output parameter.
7988 */
7989IEM_STATIC VBOXSTRICTRC
7990iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7991{
7992 if (iSegReg == UINT8_MAX)
7993 return VINF_SUCCESS;
7994
7995 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7996 switch (pVCpu->iem.s.enmCpuMode)
7997 {
7998 case IEMMODE_16BIT:
7999 case IEMMODE_32BIT:
8000 {
8001 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8002 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8003
8004 if ( pSel->Attr.n.u1Present
8005 && !pSel->Attr.n.u1Unusable)
8006 {
8007 Assert(pSel->Attr.n.u1DescType);
8008 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8009 {
8010 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8011 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8012 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8013
8014 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8015 {
8016 /** @todo CPL check. */
8017 }
8018
8019 /*
8020 * There are two kinds of data selectors, normal and expand down.
8021 */
8022 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8023 {
8024 if ( GCPtrFirst32 > pSel->u32Limit
8025 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8026 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8027 }
8028 else
8029 {
8030 /*
8031 * The upper boundary is defined by the B bit, not the G bit!
8032 */
8033 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8034 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8035 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8036 }
8037 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8038 }
8039 else
8040 {
8041
8042 /*
8043 * Code selector and usually be used to read thru, writing is
8044 * only permitted in real and V8086 mode.
8045 */
8046 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8047 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8048 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8049 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8050 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8051
8052 if ( GCPtrFirst32 > pSel->u32Limit
8053 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8054 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8055
8056 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8057 {
8058 /** @todo CPL check. */
8059 }
8060
8061 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8062 }
8063 }
8064 else
8065 return iemRaiseGeneralProtectionFault0(pVCpu);
8066 return VINF_SUCCESS;
8067 }
8068
8069 case IEMMODE_64BIT:
8070 {
8071 RTGCPTR GCPtrMem = *pGCPtrMem;
8072 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8073 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8074
8075 Assert(cbMem >= 1);
8076 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8077 return VINF_SUCCESS;
8078 return iemRaiseGeneralProtectionFault0(pVCpu);
8079 }
8080
8081 default:
8082 AssertFailedReturn(VERR_IEM_IPE_7);
8083 }
8084}
8085
8086
8087/**
8088 * Translates a virtual address to a physical physical address and checks if we
8089 * can access the page as specified.
8090 *
8091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8092 * @param GCPtrMem The virtual address.
8093 * @param fAccess The intended access.
8094 * @param pGCPhysMem Where to return the physical address.
8095 */
8096IEM_STATIC VBOXSTRICTRC
8097iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8098{
8099 /** @todo Need a different PGM interface here. We're currently using
8100 * generic / REM interfaces. this won't cut it for R0 & RC. */
8101 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8102 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8103 RTGCPHYS GCPhys;
8104 uint64_t fFlags;
8105 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8106 if (RT_FAILURE(rc))
8107 {
8108 /** @todo Check unassigned memory in unpaged mode. */
8109 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8110 *pGCPhysMem = NIL_RTGCPHYS;
8111 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8112 }
8113
8114 /* If the page is writable and does not have the no-exec bit set, all
8115 access is allowed. Otherwise we'll have to check more carefully... */
8116 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8117 {
8118 /* Write to read only memory? */
8119 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8120 && !(fFlags & X86_PTE_RW)
8121 && ( (pVCpu->iem.s.uCpl == 3
8122 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8123 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8124 {
8125 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8126 *pGCPhysMem = NIL_RTGCPHYS;
8127 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8128 }
8129
8130 /* Kernel memory accessed by userland? */
8131 if ( !(fFlags & X86_PTE_US)
8132 && pVCpu->iem.s.uCpl == 3
8133 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8134 {
8135 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8136 *pGCPhysMem = NIL_RTGCPHYS;
8137 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8138 }
8139
8140 /* Executing non-executable memory? */
8141 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8142 && (fFlags & X86_PTE_PAE_NX)
8143 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8144 {
8145 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8146 *pGCPhysMem = NIL_RTGCPHYS;
8147 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8148 VERR_ACCESS_DENIED);
8149 }
8150 }
8151
8152 /*
8153 * Set the dirty / access flags.
8154 * ASSUMES this is set when the address is translated rather than on committ...
8155 */
8156 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8157 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8158 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8159 {
8160 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8161 AssertRC(rc2);
8162 }
8163
8164 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8165 *pGCPhysMem = GCPhys;
8166 return VINF_SUCCESS;
8167}
8168
8169
8170
8171/**
8172 * Maps a physical page.
8173 *
8174 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8176 * @param GCPhysMem The physical address.
8177 * @param fAccess The intended access.
8178 * @param ppvMem Where to return the mapping address.
8179 * @param pLock The PGM lock.
8180 */
8181IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8182{
8183#ifdef IEM_VERIFICATION_MODE_FULL
8184 /* Force the alternative path so we can ignore writes. */
8185 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8186 {
8187 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8188 {
8189 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8190 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8191 if (RT_FAILURE(rc2))
8192 pVCpu->iem.s.fProblematicMemory = true;
8193 }
8194 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8195 }
8196#endif
8197#ifdef IEM_LOG_MEMORY_WRITES
8198 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8199 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8200#endif
8201#ifdef IEM_VERIFICATION_MODE_MINIMAL
8202 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8203#endif
8204
8205 /** @todo This API may require some improving later. A private deal with PGM
8206 * regarding locking and unlocking needs to be struct. A couple of TLBs
8207 * living in PGM, but with publicly accessible inlined access methods
8208 * could perhaps be an even better solution. */
8209 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8210 GCPhysMem,
8211 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8212 pVCpu->iem.s.fBypassHandlers,
8213 ppvMem,
8214 pLock);
8215 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8216 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8217
8218#ifdef IEM_VERIFICATION_MODE_FULL
8219 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8220 pVCpu->iem.s.fProblematicMemory = true;
8221#endif
8222 return rc;
8223}
8224
8225
8226/**
8227 * Unmap a page previously mapped by iemMemPageMap.
8228 *
8229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8230 * @param GCPhysMem The physical address.
8231 * @param fAccess The intended access.
8232 * @param pvMem What iemMemPageMap returned.
8233 * @param pLock The PGM lock.
8234 */
8235DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8236{
8237 NOREF(pVCpu);
8238 NOREF(GCPhysMem);
8239 NOREF(fAccess);
8240 NOREF(pvMem);
8241 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8242}
8243
8244
8245/**
8246 * Looks up a memory mapping entry.
8247 *
8248 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8250 * @param pvMem The memory address.
8251 * @param fAccess The access to.
8252 */
8253DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8254{
8255 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8256 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8257 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8258 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8259 return 0;
8260 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8261 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8262 return 1;
8263 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8264 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8265 return 2;
8266 return VERR_NOT_FOUND;
8267}
8268
8269
8270/**
8271 * Finds a free memmap entry when using iNextMapping doesn't work.
8272 *
8273 * @returns Memory mapping index, 1024 on failure.
8274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8275 */
8276IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8277{
8278 /*
8279 * The easy case.
8280 */
8281 if (pVCpu->iem.s.cActiveMappings == 0)
8282 {
8283 pVCpu->iem.s.iNextMapping = 1;
8284 return 0;
8285 }
8286
8287 /* There should be enough mappings for all instructions. */
8288 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8289
8290 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8291 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8292 return i;
8293
8294 AssertFailedReturn(1024);
8295}
8296
8297
8298/**
8299 * Commits a bounce buffer that needs writing back and unmaps it.
8300 *
8301 * @returns Strict VBox status code.
8302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8303 * @param iMemMap The index of the buffer to commit.
8304 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8305 * Always false in ring-3, obviously.
8306 */
8307IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8308{
8309 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8310 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8311#ifdef IN_RING3
8312 Assert(!fPostponeFail);
8313 RT_NOREF_PV(fPostponeFail);
8314#endif
8315
8316 /*
8317 * Do the writing.
8318 */
8319#ifndef IEM_VERIFICATION_MODE_MINIMAL
8320 PVM pVM = pVCpu->CTX_SUFF(pVM);
8321 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8322 && !IEM_VERIFICATION_ENABLED(pVCpu))
8323 {
8324 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8325 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8326 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8327 if (!pVCpu->iem.s.fBypassHandlers)
8328 {
8329 /*
8330 * Carefully and efficiently dealing with access handler return
8331 * codes make this a little bloated.
8332 */
8333 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8334 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8335 pbBuf,
8336 cbFirst,
8337 PGMACCESSORIGIN_IEM);
8338 if (rcStrict == VINF_SUCCESS)
8339 {
8340 if (cbSecond)
8341 {
8342 rcStrict = PGMPhysWrite(pVM,
8343 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8344 pbBuf + cbFirst,
8345 cbSecond,
8346 PGMACCESSORIGIN_IEM);
8347 if (rcStrict == VINF_SUCCESS)
8348 { /* nothing */ }
8349 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8350 {
8351 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8352 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8353 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8354 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8355 }
8356# ifndef IN_RING3
8357 else if (fPostponeFail)
8358 {
8359 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8360 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8361 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8362 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8363 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8364 return iemSetPassUpStatus(pVCpu, rcStrict);
8365 }
8366# endif
8367 else
8368 {
8369 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8370 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8371 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8372 return rcStrict;
8373 }
8374 }
8375 }
8376 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8377 {
8378 if (!cbSecond)
8379 {
8380 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8381 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8382 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8383 }
8384 else
8385 {
8386 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8387 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8388 pbBuf + cbFirst,
8389 cbSecond,
8390 PGMACCESSORIGIN_IEM);
8391 if (rcStrict2 == VINF_SUCCESS)
8392 {
8393 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8394 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8395 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8396 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8397 }
8398 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8399 {
8400 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8401 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8402 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8403 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8404 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8405 }
8406# ifndef IN_RING3
8407 else if (fPostponeFail)
8408 {
8409 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8410 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8412 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8413 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8414 return iemSetPassUpStatus(pVCpu, rcStrict);
8415 }
8416# endif
8417 else
8418 {
8419 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8420 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8421 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8422 return rcStrict2;
8423 }
8424 }
8425 }
8426# ifndef IN_RING3
8427 else if (fPostponeFail)
8428 {
8429 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8430 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8431 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8432 if (!cbSecond)
8433 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8434 else
8435 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8436 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8437 return iemSetPassUpStatus(pVCpu, rcStrict);
8438 }
8439# endif
8440 else
8441 {
8442 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8443 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8444 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8445 return rcStrict;
8446 }
8447 }
8448 else
8449 {
8450 /*
8451 * No access handlers, much simpler.
8452 */
8453 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8454 if (RT_SUCCESS(rc))
8455 {
8456 if (cbSecond)
8457 {
8458 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8459 if (RT_SUCCESS(rc))
8460 { /* likely */ }
8461 else
8462 {
8463 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8464 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8465 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8466 return rc;
8467 }
8468 }
8469 }
8470 else
8471 {
8472 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8473 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8474 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8475 return rc;
8476 }
8477 }
8478 }
8479#endif
8480
8481#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8482 /*
8483 * Record the write(s).
8484 */
8485 if (!pVCpu->iem.s.fNoRem)
8486 {
8487 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8488 if (pEvtRec)
8489 {
8490 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8491 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8492 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8493 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8494 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8495 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8496 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8497 }
8498 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8499 {
8500 pEvtRec = iemVerifyAllocRecord(pVCpu);
8501 if (pEvtRec)
8502 {
8503 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8504 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8505 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8506 memcpy(pEvtRec->u.RamWrite.ab,
8507 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8508 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8509 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8510 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8511 }
8512 }
8513 }
8514#endif
8515#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8516 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8517 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8518 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8519 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8520 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8521 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8522
8523 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8524 g_cbIemWrote = cbWrote;
8525 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8526#endif
8527
8528 /*
8529 * Free the mapping entry.
8530 */
8531 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8532 Assert(pVCpu->iem.s.cActiveMappings != 0);
8533 pVCpu->iem.s.cActiveMappings--;
8534 return VINF_SUCCESS;
8535}
8536
8537
8538/**
8539 * iemMemMap worker that deals with a request crossing pages.
8540 */
8541IEM_STATIC VBOXSTRICTRC
8542iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8543{
8544 /*
8545 * Do the address translations.
8546 */
8547 RTGCPHYS GCPhysFirst;
8548 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8549 if (rcStrict != VINF_SUCCESS)
8550 return rcStrict;
8551
8552 RTGCPHYS GCPhysSecond;
8553 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8554 fAccess, &GCPhysSecond);
8555 if (rcStrict != VINF_SUCCESS)
8556 return rcStrict;
8557 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8558
8559 PVM pVM = pVCpu->CTX_SUFF(pVM);
8560#ifdef IEM_VERIFICATION_MODE_FULL
8561 /*
8562 * Detect problematic memory when verifying so we can select
8563 * the right execution engine. (TLB: Redo this.)
8564 */
8565 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8566 {
8567 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8568 if (RT_SUCCESS(rc2))
8569 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8570 if (RT_FAILURE(rc2))
8571 pVCpu->iem.s.fProblematicMemory = true;
8572 }
8573#endif
8574
8575
8576 /*
8577 * Read in the current memory content if it's a read, execute or partial
8578 * write access.
8579 */
8580 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8581 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8582 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8583
8584 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8585 {
8586 if (!pVCpu->iem.s.fBypassHandlers)
8587 {
8588 /*
8589 * Must carefully deal with access handler status codes here,
8590 * makes the code a bit bloated.
8591 */
8592 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8593 if (rcStrict == VINF_SUCCESS)
8594 {
8595 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8596 if (rcStrict == VINF_SUCCESS)
8597 { /*likely */ }
8598 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8599 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8600 else
8601 {
8602 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8603 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8604 return rcStrict;
8605 }
8606 }
8607 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8608 {
8609 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8610 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8611 {
8612 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8613 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8614 }
8615 else
8616 {
8617 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8618 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8619 return rcStrict2;
8620 }
8621 }
8622 else
8623 {
8624 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8625 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8626 return rcStrict;
8627 }
8628 }
8629 else
8630 {
8631 /*
8632 * No informational status codes here, much more straight forward.
8633 */
8634 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8635 if (RT_SUCCESS(rc))
8636 {
8637 Assert(rc == VINF_SUCCESS);
8638 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8639 if (RT_SUCCESS(rc))
8640 Assert(rc == VINF_SUCCESS);
8641 else
8642 {
8643 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8644 return rc;
8645 }
8646 }
8647 else
8648 {
8649 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8650 return rc;
8651 }
8652 }
8653
8654#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8655 if ( !pVCpu->iem.s.fNoRem
8656 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8657 {
8658 /*
8659 * Record the reads.
8660 */
8661 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8662 if (pEvtRec)
8663 {
8664 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8665 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8666 pEvtRec->u.RamRead.cb = cbFirstPage;
8667 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8668 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8669 }
8670 pEvtRec = iemVerifyAllocRecord(pVCpu);
8671 if (pEvtRec)
8672 {
8673 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8674 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8675 pEvtRec->u.RamRead.cb = cbSecondPage;
8676 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8677 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8678 }
8679 }
8680#endif
8681 }
8682#ifdef VBOX_STRICT
8683 else
8684 memset(pbBuf, 0xcc, cbMem);
8685 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8686 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8687#endif
8688
8689 /*
8690 * Commit the bounce buffer entry.
8691 */
8692 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8693 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8694 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8695 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8696 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8697 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8698 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8699 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8700 pVCpu->iem.s.cActiveMappings++;
8701
8702 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8703 *ppvMem = pbBuf;
8704 return VINF_SUCCESS;
8705}
8706
8707
8708/**
8709 * iemMemMap woker that deals with iemMemPageMap failures.
8710 */
8711IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8712 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8713{
8714 /*
8715 * Filter out conditions we can handle and the ones which shouldn't happen.
8716 */
8717 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8718 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8719 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8720 {
8721 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8722 return rcMap;
8723 }
8724 pVCpu->iem.s.cPotentialExits++;
8725
8726 /*
8727 * Read in the current memory content if it's a read, execute or partial
8728 * write access.
8729 */
8730 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8731 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8732 {
8733 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8734 memset(pbBuf, 0xff, cbMem);
8735 else
8736 {
8737 int rc;
8738 if (!pVCpu->iem.s.fBypassHandlers)
8739 {
8740 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8741 if (rcStrict == VINF_SUCCESS)
8742 { /* nothing */ }
8743 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8744 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8745 else
8746 {
8747 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8748 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8749 return rcStrict;
8750 }
8751 }
8752 else
8753 {
8754 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8755 if (RT_SUCCESS(rc))
8756 { /* likely */ }
8757 else
8758 {
8759 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8760 GCPhysFirst, rc));
8761 return rc;
8762 }
8763 }
8764 }
8765
8766#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8767 if ( !pVCpu->iem.s.fNoRem
8768 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8769 {
8770 /*
8771 * Record the read.
8772 */
8773 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8774 if (pEvtRec)
8775 {
8776 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8777 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8778 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8779 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8780 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8781 }
8782 }
8783#endif
8784 }
8785#ifdef VBOX_STRICT
8786 else
8787 memset(pbBuf, 0xcc, cbMem);
8788#endif
8789#ifdef VBOX_STRICT
8790 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8791 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8792#endif
8793
8794 /*
8795 * Commit the bounce buffer entry.
8796 */
8797 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8798 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8799 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8800 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8801 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8802 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8803 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8804 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8805 pVCpu->iem.s.cActiveMappings++;
8806
8807 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8808 *ppvMem = pbBuf;
8809 return VINF_SUCCESS;
8810}
8811
8812
8813
8814/**
8815 * Maps the specified guest memory for the given kind of access.
8816 *
8817 * This may be using bounce buffering of the memory if it's crossing a page
8818 * boundary or if there is an access handler installed for any of it. Because
8819 * of lock prefix guarantees, we're in for some extra clutter when this
8820 * happens.
8821 *
8822 * This may raise a \#GP, \#SS, \#PF or \#AC.
8823 *
8824 * @returns VBox strict status code.
8825 *
8826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8827 * @param ppvMem Where to return the pointer to the mapped
8828 * memory.
8829 * @param cbMem The number of bytes to map. This is usually 1,
8830 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8831 * string operations it can be up to a page.
8832 * @param iSegReg The index of the segment register to use for
8833 * this access. The base and limits are checked.
8834 * Use UINT8_MAX to indicate that no segmentation
8835 * is required (for IDT, GDT and LDT accesses).
8836 * @param GCPtrMem The address of the guest memory.
8837 * @param fAccess How the memory is being accessed. The
8838 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8839 * how to map the memory, while the
8840 * IEM_ACCESS_WHAT_XXX bit is used when raising
8841 * exceptions.
8842 */
8843IEM_STATIC VBOXSTRICTRC
8844iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8845{
8846 /*
8847 * Check the input and figure out which mapping entry to use.
8848 */
8849 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8850 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8851 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8852
8853 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8854 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8855 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8856 {
8857 iMemMap = iemMemMapFindFree(pVCpu);
8858 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8859 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8860 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8861 pVCpu->iem.s.aMemMappings[2].fAccess),
8862 VERR_IEM_IPE_9);
8863 }
8864
8865 /*
8866 * Map the memory, checking that we can actually access it. If something
8867 * slightly complicated happens, fall back on bounce buffering.
8868 */
8869 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8870 if (rcStrict != VINF_SUCCESS)
8871 return rcStrict;
8872
8873 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8874 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8875
8876 RTGCPHYS GCPhysFirst;
8877 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8878 if (rcStrict != VINF_SUCCESS)
8879 return rcStrict;
8880
8881 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8882 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8883 if (fAccess & IEM_ACCESS_TYPE_READ)
8884 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8885
8886 void *pvMem;
8887 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8888 if (rcStrict != VINF_SUCCESS)
8889 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8890
8891 /*
8892 * Fill in the mapping table entry.
8893 */
8894 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8895 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8896 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8897 pVCpu->iem.s.cActiveMappings++;
8898
8899 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8900 *ppvMem = pvMem;
8901 return VINF_SUCCESS;
8902}
8903
8904
8905/**
8906 * Commits the guest memory if bounce buffered and unmaps it.
8907 *
8908 * @returns Strict VBox status code.
8909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8910 * @param pvMem The mapping.
8911 * @param fAccess The kind of access.
8912 */
8913IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8914{
8915 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8916 AssertReturn(iMemMap >= 0, iMemMap);
8917
8918 /* If it's bounce buffered, we may need to write back the buffer. */
8919 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8920 {
8921 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8922 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8923 }
8924 /* Otherwise unlock it. */
8925 else
8926 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8927
8928 /* Free the entry. */
8929 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8930 Assert(pVCpu->iem.s.cActiveMappings != 0);
8931 pVCpu->iem.s.cActiveMappings--;
8932 return VINF_SUCCESS;
8933}
8934
8935#ifdef IEM_WITH_SETJMP
8936
8937/**
8938 * Maps the specified guest memory for the given kind of access, longjmp on
8939 * error.
8940 *
8941 * This may be using bounce buffering of the memory if it's crossing a page
8942 * boundary or if there is an access handler installed for any of it. Because
8943 * of lock prefix guarantees, we're in for some extra clutter when this
8944 * happens.
8945 *
8946 * This may raise a \#GP, \#SS, \#PF or \#AC.
8947 *
8948 * @returns Pointer to the mapped memory.
8949 *
8950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8951 * @param cbMem The number of bytes to map. This is usually 1,
8952 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8953 * string operations it can be up to a page.
8954 * @param iSegReg The index of the segment register to use for
8955 * this access. The base and limits are checked.
8956 * Use UINT8_MAX to indicate that no segmentation
8957 * is required (for IDT, GDT and LDT accesses).
8958 * @param GCPtrMem The address of the guest memory.
8959 * @param fAccess How the memory is being accessed. The
8960 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8961 * how to map the memory, while the
8962 * IEM_ACCESS_WHAT_XXX bit is used when raising
8963 * exceptions.
8964 */
8965IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8966{
8967 /*
8968 * Check the input and figure out which mapping entry to use.
8969 */
8970 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8971 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8972 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8973
8974 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8975 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8976 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8977 {
8978 iMemMap = iemMemMapFindFree(pVCpu);
8979 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8980 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8981 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8982 pVCpu->iem.s.aMemMappings[2].fAccess),
8983 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8984 }
8985
8986 /*
8987 * Map the memory, checking that we can actually access it. If something
8988 * slightly complicated happens, fall back on bounce buffering.
8989 */
8990 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8991 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8992 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8993
8994 /* Crossing a page boundary? */
8995 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8996 { /* No (likely). */ }
8997 else
8998 {
8999 void *pvMem;
9000 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9001 if (rcStrict == VINF_SUCCESS)
9002 return pvMem;
9003 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9004 }
9005
9006 RTGCPHYS GCPhysFirst;
9007 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9008 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9009 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9010
9011 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9012 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9013 if (fAccess & IEM_ACCESS_TYPE_READ)
9014 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9015
9016 void *pvMem;
9017 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9018 if (rcStrict == VINF_SUCCESS)
9019 { /* likely */ }
9020 else
9021 {
9022 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9023 if (rcStrict == VINF_SUCCESS)
9024 return pvMem;
9025 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9026 }
9027
9028 /*
9029 * Fill in the mapping table entry.
9030 */
9031 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9032 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9033 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9034 pVCpu->iem.s.cActiveMappings++;
9035
9036 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9037 return pvMem;
9038}
9039
9040
9041/**
9042 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9043 *
9044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9045 * @param pvMem The mapping.
9046 * @param fAccess The kind of access.
9047 */
9048IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9049{
9050 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9051 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9052
9053 /* If it's bounce buffered, we may need to write back the buffer. */
9054 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9055 {
9056 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9057 {
9058 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9059 if (rcStrict == VINF_SUCCESS)
9060 return;
9061 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9062 }
9063 }
9064 /* Otherwise unlock it. */
9065 else
9066 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9067
9068 /* Free the entry. */
9069 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9070 Assert(pVCpu->iem.s.cActiveMappings != 0);
9071 pVCpu->iem.s.cActiveMappings--;
9072}
9073
9074#endif
9075
9076#ifndef IN_RING3
9077/**
9078 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9079 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9080 *
9081 * Allows the instruction to be completed and retired, while the IEM user will
9082 * return to ring-3 immediately afterwards and do the postponed writes there.
9083 *
9084 * @returns VBox status code (no strict statuses). Caller must check
9085 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9087 * @param pvMem The mapping.
9088 * @param fAccess The kind of access.
9089 */
9090IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9091{
9092 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9093 AssertReturn(iMemMap >= 0, iMemMap);
9094
9095 /* If it's bounce buffered, we may need to write back the buffer. */
9096 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9097 {
9098 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9099 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9100 }
9101 /* Otherwise unlock it. */
9102 else
9103 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9104
9105 /* Free the entry. */
9106 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9107 Assert(pVCpu->iem.s.cActiveMappings != 0);
9108 pVCpu->iem.s.cActiveMappings--;
9109 return VINF_SUCCESS;
9110}
9111#endif
9112
9113
9114/**
9115 * Rollbacks mappings, releasing page locks and such.
9116 *
9117 * The caller shall only call this after checking cActiveMappings.
9118 *
9119 * @returns Strict VBox status code to pass up.
9120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9121 */
9122IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9123{
9124 Assert(pVCpu->iem.s.cActiveMappings > 0);
9125
9126 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9127 while (iMemMap-- > 0)
9128 {
9129 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9130 if (fAccess != IEM_ACCESS_INVALID)
9131 {
9132 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9133 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9134 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9135 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9136 Assert(pVCpu->iem.s.cActiveMappings > 0);
9137 pVCpu->iem.s.cActiveMappings--;
9138 }
9139 }
9140}
9141
9142
9143/**
9144 * Fetches a data byte.
9145 *
9146 * @returns Strict VBox status code.
9147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9148 * @param pu8Dst Where to return the byte.
9149 * @param iSegReg The index of the segment register to use for
9150 * this access. The base and limits are checked.
9151 * @param GCPtrMem The address of the guest memory.
9152 */
9153IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9154{
9155 /* The lazy approach for now... */
9156 uint8_t const *pu8Src;
9157 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9158 if (rc == VINF_SUCCESS)
9159 {
9160 *pu8Dst = *pu8Src;
9161 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9162 }
9163 return rc;
9164}
9165
9166
9167#ifdef IEM_WITH_SETJMP
9168/**
9169 * Fetches a data byte, longjmp on error.
9170 *
9171 * @returns The byte.
9172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9173 * @param iSegReg The index of the segment register to use for
9174 * this access. The base and limits are checked.
9175 * @param GCPtrMem The address of the guest memory.
9176 */
9177DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9178{
9179 /* The lazy approach for now... */
9180 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9181 uint8_t const bRet = *pu8Src;
9182 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9183 return bRet;
9184}
9185#endif /* IEM_WITH_SETJMP */
9186
9187
9188/**
9189 * Fetches a data word.
9190 *
9191 * @returns Strict VBox status code.
9192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9193 * @param pu16Dst Where to return the word.
9194 * @param iSegReg The index of the segment register to use for
9195 * this access. The base and limits are checked.
9196 * @param GCPtrMem The address of the guest memory.
9197 */
9198IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9199{
9200 /* The lazy approach for now... */
9201 uint16_t const *pu16Src;
9202 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9203 if (rc == VINF_SUCCESS)
9204 {
9205 *pu16Dst = *pu16Src;
9206 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9207 }
9208 return rc;
9209}
9210
9211
9212#ifdef IEM_WITH_SETJMP
9213/**
9214 * Fetches a data word, longjmp on error.
9215 *
9216 * @returns The word
9217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9218 * @param iSegReg The index of the segment register to use for
9219 * this access. The base and limits are checked.
9220 * @param GCPtrMem The address of the guest memory.
9221 */
9222DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9223{
9224 /* The lazy approach for now... */
9225 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9226 uint16_t const u16Ret = *pu16Src;
9227 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9228 return u16Ret;
9229}
9230#endif
9231
9232
9233/**
9234 * Fetches a data dword.
9235 *
9236 * @returns Strict VBox status code.
9237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9238 * @param pu32Dst Where to return the dword.
9239 * @param iSegReg The index of the segment register to use for
9240 * this access. The base and limits are checked.
9241 * @param GCPtrMem The address of the guest memory.
9242 */
9243IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9244{
9245 /* The lazy approach for now... */
9246 uint32_t const *pu32Src;
9247 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9248 if (rc == VINF_SUCCESS)
9249 {
9250 *pu32Dst = *pu32Src;
9251 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9252 }
9253 return rc;
9254}
9255
9256
9257#ifdef IEM_WITH_SETJMP
9258
9259IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9260{
9261 Assert(cbMem >= 1);
9262 Assert(iSegReg < X86_SREG_COUNT);
9263
9264 /*
9265 * 64-bit mode is simpler.
9266 */
9267 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9268 {
9269 if (iSegReg >= X86_SREG_FS)
9270 {
9271 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9272 GCPtrMem += pSel->u64Base;
9273 }
9274
9275 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9276 return GCPtrMem;
9277 }
9278 /*
9279 * 16-bit and 32-bit segmentation.
9280 */
9281 else
9282 {
9283 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9284 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9285 == X86DESCATTR_P /* data, expand up */
9286 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9287 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9288 {
9289 /* expand up */
9290 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9291 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9292 && GCPtrLast32 > (uint32_t)GCPtrMem))
9293 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9294 }
9295 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9296 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9297 {
9298 /* expand down */
9299 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9300 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9301 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9302 && GCPtrLast32 > (uint32_t)GCPtrMem))
9303 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9304 }
9305 else
9306 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9307 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9308 }
9309 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9310}
9311
9312
9313IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9314{
9315 Assert(cbMem >= 1);
9316 Assert(iSegReg < X86_SREG_COUNT);
9317
9318 /*
9319 * 64-bit mode is simpler.
9320 */
9321 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9322 {
9323 if (iSegReg >= X86_SREG_FS)
9324 {
9325 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9326 GCPtrMem += pSel->u64Base;
9327 }
9328
9329 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9330 return GCPtrMem;
9331 }
9332 /*
9333 * 16-bit and 32-bit segmentation.
9334 */
9335 else
9336 {
9337 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9338 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9339 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9340 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9341 {
9342 /* expand up */
9343 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9344 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9345 && GCPtrLast32 > (uint32_t)GCPtrMem))
9346 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9347 }
9348 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9349 {
9350 /* expand down */
9351 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9352 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9353 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9354 && GCPtrLast32 > (uint32_t)GCPtrMem))
9355 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9356 }
9357 else
9358 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9359 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9360 }
9361 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9362}
9363
9364
9365/**
9366 * Fetches a data dword, longjmp on error, fallback/safe version.
9367 *
9368 * @returns The dword
9369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9370 * @param iSegReg The index of the segment register to use for
9371 * this access. The base and limits are checked.
9372 * @param GCPtrMem The address of the guest memory.
9373 */
9374IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9375{
9376 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9377 uint32_t const u32Ret = *pu32Src;
9378 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9379 return u32Ret;
9380}
9381
9382
9383/**
9384 * Fetches a data dword, longjmp on error.
9385 *
9386 * @returns The dword
9387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9388 * @param iSegReg The index of the segment register to use for
9389 * this access. The base and limits are checked.
9390 * @param GCPtrMem The address of the guest memory.
9391 */
9392DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9393{
9394# ifdef IEM_WITH_DATA_TLB
9395 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9396 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9397 {
9398 /// @todo more later.
9399 }
9400
9401 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9402# else
9403 /* The lazy approach. */
9404 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9405 uint32_t const u32Ret = *pu32Src;
9406 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9407 return u32Ret;
9408# endif
9409}
9410#endif
9411
9412
9413#ifdef SOME_UNUSED_FUNCTION
9414/**
9415 * Fetches a data dword and sign extends it to a qword.
9416 *
9417 * @returns Strict VBox status code.
9418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9419 * @param pu64Dst Where to return the sign extended value.
9420 * @param iSegReg The index of the segment register to use for
9421 * this access. The base and limits are checked.
9422 * @param GCPtrMem The address of the guest memory.
9423 */
9424IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9425{
9426 /* The lazy approach for now... */
9427 int32_t const *pi32Src;
9428 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9429 if (rc == VINF_SUCCESS)
9430 {
9431 *pu64Dst = *pi32Src;
9432 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9433 }
9434#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9435 else
9436 *pu64Dst = 0;
9437#endif
9438 return rc;
9439}
9440#endif
9441
9442
9443/**
9444 * Fetches a data qword.
9445 *
9446 * @returns Strict VBox status code.
9447 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9448 * @param pu64Dst Where to return the qword.
9449 * @param iSegReg The index of the segment register to use for
9450 * this access. The base and limits are checked.
9451 * @param GCPtrMem The address of the guest memory.
9452 */
9453IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9454{
9455 /* The lazy approach for now... */
9456 uint64_t const *pu64Src;
9457 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9458 if (rc == VINF_SUCCESS)
9459 {
9460 *pu64Dst = *pu64Src;
9461 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9462 }
9463 return rc;
9464}
9465
9466
9467#ifdef IEM_WITH_SETJMP
9468/**
9469 * Fetches a data qword, longjmp on error.
9470 *
9471 * @returns The qword.
9472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9473 * @param iSegReg The index of the segment register to use for
9474 * this access. The base and limits are checked.
9475 * @param GCPtrMem The address of the guest memory.
9476 */
9477DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9478{
9479 /* The lazy approach for now... */
9480 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9481 uint64_t const u64Ret = *pu64Src;
9482 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9483 return u64Ret;
9484}
9485#endif
9486
9487
9488/**
9489 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9490 *
9491 * @returns Strict VBox status code.
9492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9493 * @param pu64Dst Where to return the qword.
9494 * @param iSegReg The index of the segment register to use for
9495 * this access. The base and limits are checked.
9496 * @param GCPtrMem The address of the guest memory.
9497 */
9498IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9499{
9500 /* The lazy approach for now... */
9501 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9502 if (RT_UNLIKELY(GCPtrMem & 15))
9503 return iemRaiseGeneralProtectionFault0(pVCpu);
9504
9505 uint64_t const *pu64Src;
9506 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9507 if (rc == VINF_SUCCESS)
9508 {
9509 *pu64Dst = *pu64Src;
9510 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9511 }
9512 return rc;
9513}
9514
9515
9516#ifdef IEM_WITH_SETJMP
9517/**
9518 * Fetches a data qword, longjmp on error.
9519 *
9520 * @returns The qword.
9521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9522 * @param iSegReg The index of the segment register to use for
9523 * this access. The base and limits are checked.
9524 * @param GCPtrMem The address of the guest memory.
9525 */
9526DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9527{
9528 /* The lazy approach for now... */
9529 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9530 if (RT_LIKELY(!(GCPtrMem & 15)))
9531 {
9532 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9533 uint64_t const u64Ret = *pu64Src;
9534 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9535 return u64Ret;
9536 }
9537
9538 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9539 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9540}
9541#endif
9542
9543
9544/**
9545 * Fetches a data tword.
9546 *
9547 * @returns Strict VBox status code.
9548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9549 * @param pr80Dst Where to return the tword.
9550 * @param iSegReg The index of the segment register to use for
9551 * this access. The base and limits are checked.
9552 * @param GCPtrMem The address of the guest memory.
9553 */
9554IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9555{
9556 /* The lazy approach for now... */
9557 PCRTFLOAT80U pr80Src;
9558 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9559 if (rc == VINF_SUCCESS)
9560 {
9561 *pr80Dst = *pr80Src;
9562 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9563 }
9564 return rc;
9565}
9566
9567
9568#ifdef IEM_WITH_SETJMP
9569/**
9570 * Fetches a data tword, longjmp on error.
9571 *
9572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9573 * @param pr80Dst Where to return the tword.
9574 * @param iSegReg The index of the segment register to use for
9575 * this access. The base and limits are checked.
9576 * @param GCPtrMem The address of the guest memory.
9577 */
9578DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9579{
9580 /* The lazy approach for now... */
9581 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9582 *pr80Dst = *pr80Src;
9583 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9584}
9585#endif
9586
9587
9588/**
9589 * Fetches a data dqword (double qword), generally SSE related.
9590 *
9591 * @returns Strict VBox status code.
9592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9593 * @param pu128Dst Where to return the qword.
9594 * @param iSegReg The index of the segment register to use for
9595 * this access. The base and limits are checked.
9596 * @param GCPtrMem The address of the guest memory.
9597 */
9598IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9599{
9600 /* The lazy approach for now... */
9601 PCRTUINT128U pu128Src;
9602 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9603 if (rc == VINF_SUCCESS)
9604 {
9605 pu128Dst->au64[0] = pu128Src->au64[0];
9606 pu128Dst->au64[1] = pu128Src->au64[1];
9607 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9608 }
9609 return rc;
9610}
9611
9612
9613#ifdef IEM_WITH_SETJMP
9614/**
9615 * Fetches a data dqword (double qword), generally SSE related.
9616 *
9617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9618 * @param pu128Dst Where to return the qword.
9619 * @param iSegReg The index of the segment register to use for
9620 * this access. The base and limits are checked.
9621 * @param GCPtrMem The address of the guest memory.
9622 */
9623IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9624{
9625 /* The lazy approach for now... */
9626 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9627 pu128Dst->au64[0] = pu128Src->au64[0];
9628 pu128Dst->au64[1] = pu128Src->au64[1];
9629 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9630}
9631#endif
9632
9633
9634/**
9635 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9636 * related.
9637 *
9638 * Raises \#GP(0) if not aligned.
9639 *
9640 * @returns Strict VBox status code.
9641 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9642 * @param pu128Dst Where to return the qword.
9643 * @param iSegReg The index of the segment register to use for
9644 * this access. The base and limits are checked.
9645 * @param GCPtrMem The address of the guest memory.
9646 */
9647IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9648{
9649 /* The lazy approach for now... */
9650 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9651 if ( (GCPtrMem & 15)
9652 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9653 return iemRaiseGeneralProtectionFault0(pVCpu);
9654
9655 PCRTUINT128U pu128Src;
9656 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9657 if (rc == VINF_SUCCESS)
9658 {
9659 pu128Dst->au64[0] = pu128Src->au64[0];
9660 pu128Dst->au64[1] = pu128Src->au64[1];
9661 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9662 }
9663 return rc;
9664}
9665
9666
9667#ifdef IEM_WITH_SETJMP
9668/**
9669 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9670 * related, longjmp on error.
9671 *
9672 * Raises \#GP(0) if not aligned.
9673 *
9674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9675 * @param pu128Dst Where to return the qword.
9676 * @param iSegReg The index of the segment register to use for
9677 * this access. The base and limits are checked.
9678 * @param GCPtrMem The address of the guest memory.
9679 */
9680DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9681{
9682 /* The lazy approach for now... */
9683 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9684 if ( (GCPtrMem & 15) == 0
9685 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9686 {
9687 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9688 pu128Dst->au64[0] = pu128Src->au64[0];
9689 pu128Dst->au64[1] = pu128Src->au64[1];
9690 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9691 return;
9692 }
9693
9694 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9695 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9696}
9697#endif
9698
9699
9700/**
9701 * Fetches a data oword (octo word), generally AVX related.
9702 *
9703 * @returns Strict VBox status code.
9704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9705 * @param pu256Dst Where to return the qword.
9706 * @param iSegReg The index of the segment register to use for
9707 * this access. The base and limits are checked.
9708 * @param GCPtrMem The address of the guest memory.
9709 */
9710IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9711{
9712 /* The lazy approach for now... */
9713 PCRTUINT256U pu256Src;
9714 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9715 if (rc == VINF_SUCCESS)
9716 {
9717 pu256Dst->au64[0] = pu256Src->au64[0];
9718 pu256Dst->au64[1] = pu256Src->au64[1];
9719 pu256Dst->au64[2] = pu256Src->au64[2];
9720 pu256Dst->au64[3] = pu256Src->au64[3];
9721 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9722 }
9723 return rc;
9724}
9725
9726
9727#ifdef IEM_WITH_SETJMP
9728/**
9729 * Fetches a data oword (octo word), generally AVX related.
9730 *
9731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9732 * @param pu256Dst Where to return the qword.
9733 * @param iSegReg The index of the segment register to use for
9734 * this access. The base and limits are checked.
9735 * @param GCPtrMem The address of the guest memory.
9736 */
9737IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9738{
9739 /* The lazy approach for now... */
9740 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9741 pu256Dst->au64[0] = pu256Src->au64[0];
9742 pu256Dst->au64[1] = pu256Src->au64[1];
9743 pu256Dst->au64[2] = pu256Src->au64[2];
9744 pu256Dst->au64[3] = pu256Src->au64[3];
9745 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9746}
9747#endif
9748
9749
9750/**
9751 * Fetches a data oword (octo word) at an aligned address, generally AVX
9752 * related.
9753 *
9754 * Raises \#GP(0) if not aligned.
9755 *
9756 * @returns Strict VBox status code.
9757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9758 * @param pu256Dst Where to return the qword.
9759 * @param iSegReg The index of the segment register to use for
9760 * this access. The base and limits are checked.
9761 * @param GCPtrMem The address of the guest memory.
9762 */
9763IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9764{
9765 /* The lazy approach for now... */
9766 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9767 if (GCPtrMem & 31)
9768 return iemRaiseGeneralProtectionFault0(pVCpu);
9769
9770 PCRTUINT256U pu256Src;
9771 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9772 if (rc == VINF_SUCCESS)
9773 {
9774 pu256Dst->au64[0] = pu256Src->au64[0];
9775 pu256Dst->au64[1] = pu256Src->au64[1];
9776 pu256Dst->au64[2] = pu256Src->au64[2];
9777 pu256Dst->au64[3] = pu256Src->au64[3];
9778 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9779 }
9780 return rc;
9781}
9782
9783
9784#ifdef IEM_WITH_SETJMP
9785/**
9786 * Fetches a data oword (octo word) at an aligned address, generally AVX
9787 * related, longjmp on error.
9788 *
9789 * Raises \#GP(0) if not aligned.
9790 *
9791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9792 * @param pu256Dst Where to return the qword.
9793 * @param iSegReg The index of the segment register to use for
9794 * this access. The base and limits are checked.
9795 * @param GCPtrMem The address of the guest memory.
9796 */
9797DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9798{
9799 /* The lazy approach for now... */
9800 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9801 if ((GCPtrMem & 31) == 0)
9802 {
9803 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9804 pu256Dst->au64[0] = pu256Src->au64[0];
9805 pu256Dst->au64[1] = pu256Src->au64[1];
9806 pu256Dst->au64[2] = pu256Src->au64[2];
9807 pu256Dst->au64[3] = pu256Src->au64[3];
9808 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9809 return;
9810 }
9811
9812 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9813 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9814}
9815#endif
9816
9817
9818
9819/**
9820 * Fetches a descriptor register (lgdt, lidt).
9821 *
9822 * @returns Strict VBox status code.
9823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9824 * @param pcbLimit Where to return the limit.
9825 * @param pGCPtrBase Where to return the base.
9826 * @param iSegReg The index of the segment register to use for
9827 * this access. The base and limits are checked.
9828 * @param GCPtrMem The address of the guest memory.
9829 * @param enmOpSize The effective operand size.
9830 */
9831IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9832 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9833{
9834 /*
9835 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9836 * little special:
9837 * - The two reads are done separately.
9838 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9839 * - We suspect the 386 to actually commit the limit before the base in
9840 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9841 * don't try emulate this eccentric behavior, because it's not well
9842 * enough understood and rather hard to trigger.
9843 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9844 */
9845 VBOXSTRICTRC rcStrict;
9846 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9847 {
9848 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9849 if (rcStrict == VINF_SUCCESS)
9850 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9851 }
9852 else
9853 {
9854 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9855 if (enmOpSize == IEMMODE_32BIT)
9856 {
9857 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9858 {
9859 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9860 if (rcStrict == VINF_SUCCESS)
9861 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9862 }
9863 else
9864 {
9865 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9866 if (rcStrict == VINF_SUCCESS)
9867 {
9868 *pcbLimit = (uint16_t)uTmp;
9869 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9870 }
9871 }
9872 if (rcStrict == VINF_SUCCESS)
9873 *pGCPtrBase = uTmp;
9874 }
9875 else
9876 {
9877 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9878 if (rcStrict == VINF_SUCCESS)
9879 {
9880 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9881 if (rcStrict == VINF_SUCCESS)
9882 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9883 }
9884 }
9885 }
9886 return rcStrict;
9887}
9888
9889
9890
9891/**
9892 * Stores a data byte.
9893 *
9894 * @returns Strict VBox status code.
9895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9896 * @param iSegReg The index of the segment register to use for
9897 * this access. The base and limits are checked.
9898 * @param GCPtrMem The address of the guest memory.
9899 * @param u8Value The value to store.
9900 */
9901IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9902{
9903 /* The lazy approach for now... */
9904 uint8_t *pu8Dst;
9905 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9906 if (rc == VINF_SUCCESS)
9907 {
9908 *pu8Dst = u8Value;
9909 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9910 }
9911 return rc;
9912}
9913
9914
9915#ifdef IEM_WITH_SETJMP
9916/**
9917 * Stores a data byte, longjmp on error.
9918 *
9919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9920 * @param iSegReg The index of the segment register to use for
9921 * this access. The base and limits are checked.
9922 * @param GCPtrMem The address of the guest memory.
9923 * @param u8Value The value to store.
9924 */
9925IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9926{
9927 /* The lazy approach for now... */
9928 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9929 *pu8Dst = u8Value;
9930 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9931}
9932#endif
9933
9934
9935/**
9936 * Stores a data word.
9937 *
9938 * @returns Strict VBox status code.
9939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9940 * @param iSegReg The index of the segment register to use for
9941 * this access. The base and limits are checked.
9942 * @param GCPtrMem The address of the guest memory.
9943 * @param u16Value The value to store.
9944 */
9945IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9946{
9947 /* The lazy approach for now... */
9948 uint16_t *pu16Dst;
9949 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9950 if (rc == VINF_SUCCESS)
9951 {
9952 *pu16Dst = u16Value;
9953 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9954 }
9955 return rc;
9956}
9957
9958
9959#ifdef IEM_WITH_SETJMP
9960/**
9961 * Stores a data word, longjmp on error.
9962 *
9963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9964 * @param iSegReg The index of the segment register to use for
9965 * this access. The base and limits are checked.
9966 * @param GCPtrMem The address of the guest memory.
9967 * @param u16Value The value to store.
9968 */
9969IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9970{
9971 /* The lazy approach for now... */
9972 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9973 *pu16Dst = u16Value;
9974 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9975}
9976#endif
9977
9978
9979/**
9980 * Stores a data dword.
9981 *
9982 * @returns Strict VBox status code.
9983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9984 * @param iSegReg The index of the segment register to use for
9985 * this access. The base and limits are checked.
9986 * @param GCPtrMem The address of the guest memory.
9987 * @param u32Value The value to store.
9988 */
9989IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9990{
9991 /* The lazy approach for now... */
9992 uint32_t *pu32Dst;
9993 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9994 if (rc == VINF_SUCCESS)
9995 {
9996 *pu32Dst = u32Value;
9997 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9998 }
9999 return rc;
10000}
10001
10002
10003#ifdef IEM_WITH_SETJMP
10004/**
10005 * Stores a data dword.
10006 *
10007 * @returns Strict VBox status code.
10008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10009 * @param iSegReg The index of the segment register to use for
10010 * this access. The base and limits are checked.
10011 * @param GCPtrMem The address of the guest memory.
10012 * @param u32Value The value to store.
10013 */
10014IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10015{
10016 /* The lazy approach for now... */
10017 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10018 *pu32Dst = u32Value;
10019 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10020}
10021#endif
10022
10023
10024/**
10025 * Stores a data qword.
10026 *
10027 * @returns Strict VBox status code.
10028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10029 * @param iSegReg The index of the segment register to use for
10030 * this access. The base and limits are checked.
10031 * @param GCPtrMem The address of the guest memory.
10032 * @param u64Value The value to store.
10033 */
10034IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10035{
10036 /* The lazy approach for now... */
10037 uint64_t *pu64Dst;
10038 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10039 if (rc == VINF_SUCCESS)
10040 {
10041 *pu64Dst = u64Value;
10042 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10043 }
10044 return rc;
10045}
10046
10047
10048#ifdef IEM_WITH_SETJMP
10049/**
10050 * Stores a data qword, longjmp on error.
10051 *
10052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10053 * @param iSegReg The index of the segment register to use for
10054 * this access. The base and limits are checked.
10055 * @param GCPtrMem The address of the guest memory.
10056 * @param u64Value The value to store.
10057 */
10058IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10059{
10060 /* The lazy approach for now... */
10061 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10062 *pu64Dst = u64Value;
10063 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10064}
10065#endif
10066
10067
10068/**
10069 * Stores a data dqword.
10070 *
10071 * @returns Strict VBox status code.
10072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10073 * @param iSegReg The index of the segment register to use for
10074 * this access. The base and limits are checked.
10075 * @param GCPtrMem The address of the guest memory.
10076 * @param u128Value The value to store.
10077 */
10078IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10079{
10080 /* The lazy approach for now... */
10081 PRTUINT128U pu128Dst;
10082 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10083 if (rc == VINF_SUCCESS)
10084 {
10085 pu128Dst->au64[0] = u128Value.au64[0];
10086 pu128Dst->au64[1] = u128Value.au64[1];
10087 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10088 }
10089 return rc;
10090}
10091
10092
10093#ifdef IEM_WITH_SETJMP
10094/**
10095 * Stores a data dqword, longjmp on error.
10096 *
10097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10098 * @param iSegReg The index of the segment register to use for
10099 * this access. The base and limits are checked.
10100 * @param GCPtrMem The address of the guest memory.
10101 * @param u128Value The value to store.
10102 */
10103IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10104{
10105 /* The lazy approach for now... */
10106 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10107 pu128Dst->au64[0] = u128Value.au64[0];
10108 pu128Dst->au64[1] = u128Value.au64[1];
10109 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10110}
10111#endif
10112
10113
10114/**
10115 * Stores a data dqword, SSE aligned.
10116 *
10117 * @returns Strict VBox status code.
10118 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10119 * @param iSegReg The index of the segment register to use for
10120 * this access. The base and limits are checked.
10121 * @param GCPtrMem The address of the guest memory.
10122 * @param u128Value The value to store.
10123 */
10124IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10125{
10126 /* The lazy approach for now... */
10127 if ( (GCPtrMem & 15)
10128 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10129 return iemRaiseGeneralProtectionFault0(pVCpu);
10130
10131 PRTUINT128U pu128Dst;
10132 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10133 if (rc == VINF_SUCCESS)
10134 {
10135 pu128Dst->au64[0] = u128Value.au64[0];
10136 pu128Dst->au64[1] = u128Value.au64[1];
10137 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10138 }
10139 return rc;
10140}
10141
10142
10143#ifdef IEM_WITH_SETJMP
10144/**
10145 * Stores a data dqword, SSE aligned.
10146 *
10147 * @returns Strict VBox status code.
10148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10149 * @param iSegReg The index of the segment register to use for
10150 * this access. The base and limits are checked.
10151 * @param GCPtrMem The address of the guest memory.
10152 * @param u128Value The value to store.
10153 */
10154DECL_NO_INLINE(IEM_STATIC, void)
10155iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10156{
10157 /* The lazy approach for now... */
10158 if ( (GCPtrMem & 15) == 0
10159 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10160 {
10161 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10162 pu128Dst->au64[0] = u128Value.au64[0];
10163 pu128Dst->au64[1] = u128Value.au64[1];
10164 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10165 return;
10166 }
10167
10168 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10169 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10170}
10171#endif
10172
10173
10174/**
10175 * Stores a data dqword.
10176 *
10177 * @returns Strict VBox status code.
10178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10179 * @param iSegReg The index of the segment register to use for
10180 * this access. The base and limits are checked.
10181 * @param GCPtrMem The address of the guest memory.
10182 * @param pu256Value Pointer to the value to store.
10183 */
10184IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10185{
10186 /* The lazy approach for now... */
10187 PRTUINT256U pu256Dst;
10188 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10189 if (rc == VINF_SUCCESS)
10190 {
10191 pu256Dst->au64[0] = pu256Value->au64[0];
10192 pu256Dst->au64[1] = pu256Value->au64[1];
10193 pu256Dst->au64[2] = pu256Value->au64[2];
10194 pu256Dst->au64[3] = pu256Value->au64[3];
10195 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10196 }
10197 return rc;
10198}
10199
10200
10201#ifdef IEM_WITH_SETJMP
10202/**
10203 * Stores a data dqword, longjmp on error.
10204 *
10205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10206 * @param iSegReg The index of the segment register to use for
10207 * this access. The base and limits are checked.
10208 * @param GCPtrMem The address of the guest memory.
10209 * @param pu256Value Pointer to the value to store.
10210 */
10211IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10212{
10213 /* The lazy approach for now... */
10214 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10215 pu256Dst->au64[0] = pu256Value->au64[0];
10216 pu256Dst->au64[1] = pu256Value->au64[1];
10217 pu256Dst->au64[2] = pu256Value->au64[2];
10218 pu256Dst->au64[3] = pu256Value->au64[3];
10219 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10220}
10221#endif
10222
10223
10224/**
10225 * Stores a data dqword, AVX aligned.
10226 *
10227 * @returns Strict VBox status code.
10228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10229 * @param iSegReg The index of the segment register to use for
10230 * this access. The base and limits are checked.
10231 * @param GCPtrMem The address of the guest memory.
10232 * @param pu256Value Pointer to the value to store.
10233 */
10234IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10235{
10236 /* The lazy approach for now... */
10237 if (GCPtrMem & 31)
10238 return iemRaiseGeneralProtectionFault0(pVCpu);
10239
10240 PRTUINT256U pu256Dst;
10241 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10242 if (rc == VINF_SUCCESS)
10243 {
10244 pu256Dst->au64[0] = pu256Value->au64[0];
10245 pu256Dst->au64[1] = pu256Value->au64[1];
10246 pu256Dst->au64[2] = pu256Value->au64[2];
10247 pu256Dst->au64[3] = pu256Value->au64[3];
10248 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10249 }
10250 return rc;
10251}
10252
10253
10254#ifdef IEM_WITH_SETJMP
10255/**
10256 * Stores a data dqword, AVX aligned.
10257 *
10258 * @returns Strict VBox status code.
10259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10260 * @param iSegReg The index of the segment register to use for
10261 * this access. The base and limits are checked.
10262 * @param GCPtrMem The address of the guest memory.
10263 * @param pu256Value Pointer to the value to store.
10264 */
10265DECL_NO_INLINE(IEM_STATIC, void)
10266iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10267{
10268 /* The lazy approach for now... */
10269 if ((GCPtrMem & 31) == 0)
10270 {
10271 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10272 pu256Dst->au64[0] = pu256Value->au64[0];
10273 pu256Dst->au64[1] = pu256Value->au64[1];
10274 pu256Dst->au64[2] = pu256Value->au64[2];
10275 pu256Dst->au64[3] = pu256Value->au64[3];
10276 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10277 return;
10278 }
10279
10280 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10281 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10282}
10283#endif
10284
10285
10286/**
10287 * Stores a descriptor register (sgdt, sidt).
10288 *
10289 * @returns Strict VBox status code.
10290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10291 * @param cbLimit The limit.
10292 * @param GCPtrBase The base address.
10293 * @param iSegReg The index of the segment register to use for
10294 * this access. The base and limits are checked.
10295 * @param GCPtrMem The address of the guest memory.
10296 */
10297IEM_STATIC VBOXSTRICTRC
10298iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10299{
10300 /*
10301 * The SIDT and SGDT instructions actually stores the data using two
10302 * independent writes. The instructions does not respond to opsize prefixes.
10303 */
10304 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10305 if (rcStrict == VINF_SUCCESS)
10306 {
10307 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10308 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10309 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10310 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10311 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10312 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10313 else
10314 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10315 }
10316 return rcStrict;
10317}
10318
10319
10320/**
10321 * Pushes a word onto the stack.
10322 *
10323 * @returns Strict VBox status code.
10324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10325 * @param u16Value The value to push.
10326 */
10327IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10328{
10329 /* Increment the stack pointer. */
10330 uint64_t uNewRsp;
10331 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10332 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10333
10334 /* Write the word the lazy way. */
10335 uint16_t *pu16Dst;
10336 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10337 if (rc == VINF_SUCCESS)
10338 {
10339 *pu16Dst = u16Value;
10340 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10341 }
10342
10343 /* Commit the new RSP value unless we an access handler made trouble. */
10344 if (rc == VINF_SUCCESS)
10345 pCtx->rsp = uNewRsp;
10346
10347 return rc;
10348}
10349
10350
10351/**
10352 * Pushes a dword onto the stack.
10353 *
10354 * @returns Strict VBox status code.
10355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10356 * @param u32Value The value to push.
10357 */
10358IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10359{
10360 /* Increment the stack pointer. */
10361 uint64_t uNewRsp;
10362 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10363 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10364
10365 /* Write the dword the lazy way. */
10366 uint32_t *pu32Dst;
10367 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10368 if (rc == VINF_SUCCESS)
10369 {
10370 *pu32Dst = u32Value;
10371 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10372 }
10373
10374 /* Commit the new RSP value unless we an access handler made trouble. */
10375 if (rc == VINF_SUCCESS)
10376 pCtx->rsp = uNewRsp;
10377
10378 return rc;
10379}
10380
10381
10382/**
10383 * Pushes a dword segment register value onto the stack.
10384 *
10385 * @returns Strict VBox status code.
10386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10387 * @param u32Value The value to push.
10388 */
10389IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10390{
10391 /* Increment the stack pointer. */
10392 uint64_t uNewRsp;
10393 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10394 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10395
10396 VBOXSTRICTRC rc;
10397 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10398 {
10399 /* The recompiler writes a full dword. */
10400 uint32_t *pu32Dst;
10401 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10402 if (rc == VINF_SUCCESS)
10403 {
10404 *pu32Dst = u32Value;
10405 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10406 }
10407 }
10408 else
10409 {
10410 /* The intel docs talks about zero extending the selector register
10411 value. My actual intel CPU here might be zero extending the value
10412 but it still only writes the lower word... */
10413 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10414 * happens when crossing an electric page boundrary, is the high word checked
10415 * for write accessibility or not? Probably it is. What about segment limits?
10416 * It appears this behavior is also shared with trap error codes.
10417 *
10418 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10419 * ancient hardware when it actually did change. */
10420 uint16_t *pu16Dst;
10421 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10422 if (rc == VINF_SUCCESS)
10423 {
10424 *pu16Dst = (uint16_t)u32Value;
10425 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10426 }
10427 }
10428
10429 /* Commit the new RSP value unless we an access handler made trouble. */
10430 if (rc == VINF_SUCCESS)
10431 pCtx->rsp = uNewRsp;
10432
10433 return rc;
10434}
10435
10436
10437/**
10438 * Pushes a qword onto the stack.
10439 *
10440 * @returns Strict VBox status code.
10441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10442 * @param u64Value The value to push.
10443 */
10444IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10445{
10446 /* Increment the stack pointer. */
10447 uint64_t uNewRsp;
10448 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10449 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10450
10451 /* Write the word the lazy way. */
10452 uint64_t *pu64Dst;
10453 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10454 if (rc == VINF_SUCCESS)
10455 {
10456 *pu64Dst = u64Value;
10457 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10458 }
10459
10460 /* Commit the new RSP value unless we an access handler made trouble. */
10461 if (rc == VINF_SUCCESS)
10462 pCtx->rsp = uNewRsp;
10463
10464 return rc;
10465}
10466
10467
10468/**
10469 * Pops a word from the stack.
10470 *
10471 * @returns Strict VBox status code.
10472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10473 * @param pu16Value Where to store the popped value.
10474 */
10475IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10476{
10477 /* Increment the stack pointer. */
10478 uint64_t uNewRsp;
10479 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10480 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10481
10482 /* Write the word the lazy way. */
10483 uint16_t const *pu16Src;
10484 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10485 if (rc == VINF_SUCCESS)
10486 {
10487 *pu16Value = *pu16Src;
10488 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10489
10490 /* Commit the new RSP value. */
10491 if (rc == VINF_SUCCESS)
10492 pCtx->rsp = uNewRsp;
10493 }
10494
10495 return rc;
10496}
10497
10498
10499/**
10500 * Pops a dword from the stack.
10501 *
10502 * @returns Strict VBox status code.
10503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10504 * @param pu32Value Where to store the popped value.
10505 */
10506IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10507{
10508 /* Increment the stack pointer. */
10509 uint64_t uNewRsp;
10510 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10511 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10512
10513 /* Write the word the lazy way. */
10514 uint32_t const *pu32Src;
10515 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10516 if (rc == VINF_SUCCESS)
10517 {
10518 *pu32Value = *pu32Src;
10519 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10520
10521 /* Commit the new RSP value. */
10522 if (rc == VINF_SUCCESS)
10523 pCtx->rsp = uNewRsp;
10524 }
10525
10526 return rc;
10527}
10528
10529
10530/**
10531 * Pops a qword from the stack.
10532 *
10533 * @returns Strict VBox status code.
10534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10535 * @param pu64Value Where to store the popped value.
10536 */
10537IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10538{
10539 /* Increment the stack pointer. */
10540 uint64_t uNewRsp;
10541 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10542 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10543
10544 /* Write the word the lazy way. */
10545 uint64_t const *pu64Src;
10546 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10547 if (rc == VINF_SUCCESS)
10548 {
10549 *pu64Value = *pu64Src;
10550 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10551
10552 /* Commit the new RSP value. */
10553 if (rc == VINF_SUCCESS)
10554 pCtx->rsp = uNewRsp;
10555 }
10556
10557 return rc;
10558}
10559
10560
10561/**
10562 * Pushes a word onto the stack, using a temporary stack pointer.
10563 *
10564 * @returns Strict VBox status code.
10565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10566 * @param u16Value The value to push.
10567 * @param pTmpRsp Pointer to the temporary stack pointer.
10568 */
10569IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10570{
10571 /* Increment the stack pointer. */
10572 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10573 RTUINT64U NewRsp = *pTmpRsp;
10574 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10575
10576 /* Write the word the lazy way. */
10577 uint16_t *pu16Dst;
10578 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10579 if (rc == VINF_SUCCESS)
10580 {
10581 *pu16Dst = u16Value;
10582 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10583 }
10584
10585 /* Commit the new RSP value unless we an access handler made trouble. */
10586 if (rc == VINF_SUCCESS)
10587 *pTmpRsp = NewRsp;
10588
10589 return rc;
10590}
10591
10592
10593/**
10594 * Pushes a dword onto the stack, using a temporary stack pointer.
10595 *
10596 * @returns Strict VBox status code.
10597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10598 * @param u32Value The value to push.
10599 * @param pTmpRsp Pointer to the temporary stack pointer.
10600 */
10601IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10602{
10603 /* Increment the stack pointer. */
10604 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10605 RTUINT64U NewRsp = *pTmpRsp;
10606 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10607
10608 /* Write the word the lazy way. */
10609 uint32_t *pu32Dst;
10610 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10611 if (rc == VINF_SUCCESS)
10612 {
10613 *pu32Dst = u32Value;
10614 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10615 }
10616
10617 /* Commit the new RSP value unless we an access handler made trouble. */
10618 if (rc == VINF_SUCCESS)
10619 *pTmpRsp = NewRsp;
10620
10621 return rc;
10622}
10623
10624
10625/**
10626 * Pushes a dword onto the stack, using a temporary stack pointer.
10627 *
10628 * @returns Strict VBox status code.
10629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10630 * @param u64Value The value to push.
10631 * @param pTmpRsp Pointer to the temporary stack pointer.
10632 */
10633IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10634{
10635 /* Increment the stack pointer. */
10636 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10637 RTUINT64U NewRsp = *pTmpRsp;
10638 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10639
10640 /* Write the word the lazy way. */
10641 uint64_t *pu64Dst;
10642 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10643 if (rc == VINF_SUCCESS)
10644 {
10645 *pu64Dst = u64Value;
10646 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10647 }
10648
10649 /* Commit the new RSP value unless we an access handler made trouble. */
10650 if (rc == VINF_SUCCESS)
10651 *pTmpRsp = NewRsp;
10652
10653 return rc;
10654}
10655
10656
10657/**
10658 * Pops a word from the stack, using a temporary stack pointer.
10659 *
10660 * @returns Strict VBox status code.
10661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10662 * @param pu16Value Where to store the popped value.
10663 * @param pTmpRsp Pointer to the temporary stack pointer.
10664 */
10665IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10666{
10667 /* Increment the stack pointer. */
10668 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10669 RTUINT64U NewRsp = *pTmpRsp;
10670 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10671
10672 /* Write the word the lazy way. */
10673 uint16_t const *pu16Src;
10674 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10675 if (rc == VINF_SUCCESS)
10676 {
10677 *pu16Value = *pu16Src;
10678 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10679
10680 /* Commit the new RSP value. */
10681 if (rc == VINF_SUCCESS)
10682 *pTmpRsp = NewRsp;
10683 }
10684
10685 return rc;
10686}
10687
10688
10689/**
10690 * Pops a dword from the stack, using a temporary stack pointer.
10691 *
10692 * @returns Strict VBox status code.
10693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10694 * @param pu32Value Where to store the popped value.
10695 * @param pTmpRsp Pointer to the temporary stack pointer.
10696 */
10697IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10698{
10699 /* Increment the stack pointer. */
10700 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10701 RTUINT64U NewRsp = *pTmpRsp;
10702 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10703
10704 /* Write the word the lazy way. */
10705 uint32_t const *pu32Src;
10706 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10707 if (rc == VINF_SUCCESS)
10708 {
10709 *pu32Value = *pu32Src;
10710 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10711
10712 /* Commit the new RSP value. */
10713 if (rc == VINF_SUCCESS)
10714 *pTmpRsp = NewRsp;
10715 }
10716
10717 return rc;
10718}
10719
10720
10721/**
10722 * Pops a qword from the stack, using a temporary stack pointer.
10723 *
10724 * @returns Strict VBox status code.
10725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10726 * @param pu64Value Where to store the popped value.
10727 * @param pTmpRsp Pointer to the temporary stack pointer.
10728 */
10729IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10730{
10731 /* Increment the stack pointer. */
10732 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10733 RTUINT64U NewRsp = *pTmpRsp;
10734 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10735
10736 /* Write the word the lazy way. */
10737 uint64_t const *pu64Src;
10738 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10739 if (rcStrict == VINF_SUCCESS)
10740 {
10741 *pu64Value = *pu64Src;
10742 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10743
10744 /* Commit the new RSP value. */
10745 if (rcStrict == VINF_SUCCESS)
10746 *pTmpRsp = NewRsp;
10747 }
10748
10749 return rcStrict;
10750}
10751
10752
10753/**
10754 * Begin a special stack push (used by interrupt, exceptions and such).
10755 *
10756 * This will raise \#SS or \#PF if appropriate.
10757 *
10758 * @returns Strict VBox status code.
10759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10760 * @param cbMem The number of bytes to push onto the stack.
10761 * @param ppvMem Where to return the pointer to the stack memory.
10762 * As with the other memory functions this could be
10763 * direct access or bounce buffered access, so
10764 * don't commit register until the commit call
10765 * succeeds.
10766 * @param puNewRsp Where to return the new RSP value. This must be
10767 * passed unchanged to
10768 * iemMemStackPushCommitSpecial().
10769 */
10770IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10771{
10772 Assert(cbMem < UINT8_MAX);
10773 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10774 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10775 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10776}
10777
10778
10779/**
10780 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10781 *
10782 * This will update the rSP.
10783 *
10784 * @returns Strict VBox status code.
10785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10786 * @param pvMem The pointer returned by
10787 * iemMemStackPushBeginSpecial().
10788 * @param uNewRsp The new RSP value returned by
10789 * iemMemStackPushBeginSpecial().
10790 */
10791IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10792{
10793 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10794 if (rcStrict == VINF_SUCCESS)
10795 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10796 return rcStrict;
10797}
10798
10799
10800/**
10801 * Begin a special stack pop (used by iret, retf and such).
10802 *
10803 * This will raise \#SS or \#PF if appropriate.
10804 *
10805 * @returns Strict VBox status code.
10806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10807 * @param cbMem The number of bytes to pop from the stack.
10808 * @param ppvMem Where to return the pointer to the stack memory.
10809 * @param puNewRsp Where to return the new RSP value. This must be
10810 * assigned to CPUMCTX::rsp manually some time
10811 * after iemMemStackPopDoneSpecial() has been
10812 * called.
10813 */
10814IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10815{
10816 Assert(cbMem < UINT8_MAX);
10817 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10818 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10819 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10820}
10821
10822
10823/**
10824 * Continue a special stack pop (used by iret and retf).
10825 *
10826 * This will raise \#SS or \#PF if appropriate.
10827 *
10828 * @returns Strict VBox status code.
10829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10830 * @param cbMem The number of bytes to pop from the stack.
10831 * @param ppvMem Where to return the pointer to the stack memory.
10832 * @param puNewRsp Where to return the new RSP value. This must be
10833 * assigned to CPUMCTX::rsp manually some time
10834 * after iemMemStackPopDoneSpecial() has been
10835 * called.
10836 */
10837IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10838{
10839 Assert(cbMem < UINT8_MAX);
10840 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10841 RTUINT64U NewRsp;
10842 NewRsp.u = *puNewRsp;
10843 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10844 *puNewRsp = NewRsp.u;
10845 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10846}
10847
10848
10849/**
10850 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10851 * iemMemStackPopContinueSpecial).
10852 *
10853 * The caller will manually commit the rSP.
10854 *
10855 * @returns Strict VBox status code.
10856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10857 * @param pvMem The pointer returned by
10858 * iemMemStackPopBeginSpecial() or
10859 * iemMemStackPopContinueSpecial().
10860 */
10861IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10862{
10863 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10864}
10865
10866
10867/**
10868 * Fetches a system table byte.
10869 *
10870 * @returns Strict VBox status code.
10871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10872 * @param pbDst Where to return the byte.
10873 * @param iSegReg The index of the segment register to use for
10874 * this access. The base and limits are checked.
10875 * @param GCPtrMem The address of the guest memory.
10876 */
10877IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10878{
10879 /* The lazy approach for now... */
10880 uint8_t const *pbSrc;
10881 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10882 if (rc == VINF_SUCCESS)
10883 {
10884 *pbDst = *pbSrc;
10885 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10886 }
10887 return rc;
10888}
10889
10890
10891/**
10892 * Fetches a system table word.
10893 *
10894 * @returns Strict VBox status code.
10895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10896 * @param pu16Dst Where to return the word.
10897 * @param iSegReg The index of the segment register to use for
10898 * this access. The base and limits are checked.
10899 * @param GCPtrMem The address of the guest memory.
10900 */
10901IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10902{
10903 /* The lazy approach for now... */
10904 uint16_t const *pu16Src;
10905 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10906 if (rc == VINF_SUCCESS)
10907 {
10908 *pu16Dst = *pu16Src;
10909 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10910 }
10911 return rc;
10912}
10913
10914
10915/**
10916 * Fetches a system table dword.
10917 *
10918 * @returns Strict VBox status code.
10919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10920 * @param pu32Dst Where to return the dword.
10921 * @param iSegReg The index of the segment register to use for
10922 * this access. The base and limits are checked.
10923 * @param GCPtrMem The address of the guest memory.
10924 */
10925IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10926{
10927 /* The lazy approach for now... */
10928 uint32_t const *pu32Src;
10929 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10930 if (rc == VINF_SUCCESS)
10931 {
10932 *pu32Dst = *pu32Src;
10933 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10934 }
10935 return rc;
10936}
10937
10938
10939/**
10940 * Fetches a system table qword.
10941 *
10942 * @returns Strict VBox status code.
10943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10944 * @param pu64Dst Where to return the qword.
10945 * @param iSegReg The index of the segment register to use for
10946 * this access. The base and limits are checked.
10947 * @param GCPtrMem The address of the guest memory.
10948 */
10949IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10950{
10951 /* The lazy approach for now... */
10952 uint64_t const *pu64Src;
10953 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10954 if (rc == VINF_SUCCESS)
10955 {
10956 *pu64Dst = *pu64Src;
10957 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10958 }
10959 return rc;
10960}
10961
10962
10963/**
10964 * Fetches a descriptor table entry with caller specified error code.
10965 *
10966 * @returns Strict VBox status code.
10967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10968 * @param pDesc Where to return the descriptor table entry.
10969 * @param uSel The selector which table entry to fetch.
10970 * @param uXcpt The exception to raise on table lookup error.
10971 * @param uErrorCode The error code associated with the exception.
10972 */
10973IEM_STATIC VBOXSTRICTRC
10974iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10975{
10976 AssertPtr(pDesc);
10977 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10978
10979 /** @todo did the 286 require all 8 bytes to be accessible? */
10980 /*
10981 * Get the selector table base and check bounds.
10982 */
10983 RTGCPTR GCPtrBase;
10984 if (uSel & X86_SEL_LDT)
10985 {
10986 if ( !pCtx->ldtr.Attr.n.u1Present
10987 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10988 {
10989 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10990 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10991 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10992 uErrorCode, 0);
10993 }
10994
10995 Assert(pCtx->ldtr.Attr.n.u1Present);
10996 GCPtrBase = pCtx->ldtr.u64Base;
10997 }
10998 else
10999 {
11000 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
11001 {
11002 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
11003 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11004 uErrorCode, 0);
11005 }
11006 GCPtrBase = pCtx->gdtr.pGdt;
11007 }
11008
11009 /*
11010 * Read the legacy descriptor and maybe the long mode extensions if
11011 * required.
11012 */
11013 VBOXSTRICTRC rcStrict;
11014 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11015 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11016 else
11017 {
11018 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11019 if (rcStrict == VINF_SUCCESS)
11020 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11021 if (rcStrict == VINF_SUCCESS)
11022 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11023 if (rcStrict == VINF_SUCCESS)
11024 pDesc->Legacy.au16[3] = 0;
11025 else
11026 return rcStrict;
11027 }
11028
11029 if (rcStrict == VINF_SUCCESS)
11030 {
11031 if ( !IEM_IS_LONG_MODE(pVCpu)
11032 || pDesc->Legacy.Gen.u1DescType)
11033 pDesc->Long.au64[1] = 0;
11034 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
11035 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11036 else
11037 {
11038 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11039 /** @todo is this the right exception? */
11040 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11041 }
11042 }
11043 return rcStrict;
11044}
11045
11046
11047/**
11048 * Fetches a descriptor table entry.
11049 *
11050 * @returns Strict VBox status code.
11051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11052 * @param pDesc Where to return the descriptor table entry.
11053 * @param uSel The selector which table entry to fetch.
11054 * @param uXcpt The exception to raise on table lookup error.
11055 */
11056IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11057{
11058 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11059}
11060
11061
11062/**
11063 * Fakes a long mode stack selector for SS = 0.
11064 *
11065 * @param pDescSs Where to return the fake stack descriptor.
11066 * @param uDpl The DPL we want.
11067 */
11068IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11069{
11070 pDescSs->Long.au64[0] = 0;
11071 pDescSs->Long.au64[1] = 0;
11072 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11073 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11074 pDescSs->Long.Gen.u2Dpl = uDpl;
11075 pDescSs->Long.Gen.u1Present = 1;
11076 pDescSs->Long.Gen.u1Long = 1;
11077}
11078
11079
11080/**
11081 * Marks the selector descriptor as accessed (only non-system descriptors).
11082 *
11083 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11084 * will therefore skip the limit checks.
11085 *
11086 * @returns Strict VBox status code.
11087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11088 * @param uSel The selector.
11089 */
11090IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11091{
11092 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11093
11094 /*
11095 * Get the selector table base and calculate the entry address.
11096 */
11097 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11098 ? pCtx->ldtr.u64Base
11099 : pCtx->gdtr.pGdt;
11100 GCPtr += uSel & X86_SEL_MASK;
11101
11102 /*
11103 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11104 * ugly stuff to avoid this. This will make sure it's an atomic access
11105 * as well more or less remove any question about 8-bit or 32-bit accesss.
11106 */
11107 VBOXSTRICTRC rcStrict;
11108 uint32_t volatile *pu32;
11109 if ((GCPtr & 3) == 0)
11110 {
11111 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11112 GCPtr += 2 + 2;
11113 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11114 if (rcStrict != VINF_SUCCESS)
11115 return rcStrict;
11116 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11117 }
11118 else
11119 {
11120 /* The misaligned GDT/LDT case, map the whole thing. */
11121 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11122 if (rcStrict != VINF_SUCCESS)
11123 return rcStrict;
11124 switch ((uintptr_t)pu32 & 3)
11125 {
11126 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11127 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11128 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11129 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11130 }
11131 }
11132
11133 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11134}
11135
11136/** @} */
11137
11138
11139/*
11140 * Include the C/C++ implementation of instruction.
11141 */
11142#include "IEMAllCImpl.cpp.h"
11143
11144
11145
11146/** @name "Microcode" macros.
11147 *
11148 * The idea is that we should be able to use the same code to interpret
11149 * instructions as well as recompiler instructions. Thus this obfuscation.
11150 *
11151 * @{
11152 */
11153#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11154#define IEM_MC_END() }
11155#define IEM_MC_PAUSE() do {} while (0)
11156#define IEM_MC_CONTINUE() do {} while (0)
11157
11158/** Internal macro. */
11159#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11160 do \
11161 { \
11162 VBOXSTRICTRC rcStrict2 = a_Expr; \
11163 if (rcStrict2 != VINF_SUCCESS) \
11164 return rcStrict2; \
11165 } while (0)
11166
11167
11168#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11169#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11170#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11171#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11172#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11173#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11174#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11175#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11176#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11177 do { \
11178 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11179 return iemRaiseDeviceNotAvailable(pVCpu); \
11180 } while (0)
11181#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11182 do { \
11183 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11184 return iemRaiseDeviceNotAvailable(pVCpu); \
11185 } while (0)
11186#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11187 do { \
11188 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11189 return iemRaiseMathFault(pVCpu); \
11190 } while (0)
11191#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11192 do { \
11193 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11194 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11195 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11196 return iemRaiseUndefinedOpcode(pVCpu); \
11197 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11198 return iemRaiseDeviceNotAvailable(pVCpu); \
11199 } while (0)
11200#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11201 do { \
11202 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11203 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11204 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11205 return iemRaiseUndefinedOpcode(pVCpu); \
11206 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11207 return iemRaiseDeviceNotAvailable(pVCpu); \
11208 } while (0)
11209#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11210 do { \
11211 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11212 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11213 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11214 return iemRaiseUndefinedOpcode(pVCpu); \
11215 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11216 return iemRaiseDeviceNotAvailable(pVCpu); \
11217 } while (0)
11218#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11219 do { \
11220 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11221 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11222 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11223 return iemRaiseUndefinedOpcode(pVCpu); \
11224 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11225 return iemRaiseDeviceNotAvailable(pVCpu); \
11226 } while (0)
11227#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11228 do { \
11229 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11230 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11231 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11232 return iemRaiseUndefinedOpcode(pVCpu); \
11233 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11234 return iemRaiseDeviceNotAvailable(pVCpu); \
11235 } while (0)
11236#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11237 do { \
11238 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11239 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11240 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11241 return iemRaiseUndefinedOpcode(pVCpu); \
11242 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11243 return iemRaiseDeviceNotAvailable(pVCpu); \
11244 } while (0)
11245#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11246 do { \
11247 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11248 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11249 return iemRaiseUndefinedOpcode(pVCpu); \
11250 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11251 return iemRaiseDeviceNotAvailable(pVCpu); \
11252 } while (0)
11253#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11254 do { \
11255 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11256 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11257 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11258 return iemRaiseUndefinedOpcode(pVCpu); \
11259 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11260 return iemRaiseDeviceNotAvailable(pVCpu); \
11261 } while (0)
11262#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11263 do { \
11264 if (pVCpu->iem.s.uCpl != 0) \
11265 return iemRaiseGeneralProtectionFault0(pVCpu); \
11266 } while (0)
11267#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11268 do { \
11269 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11270 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11271 } while (0)
11272#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11273 do { \
11274 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11275 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11276 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_FSGSBASE)) \
11277 return iemRaiseUndefinedOpcode(pVCpu); \
11278 } while (0)
11279#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11280 do { \
11281 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11282 return iemRaiseGeneralProtectionFault0(pVCpu); \
11283 } while (0)
11284
11285
11286#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11287#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11288#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11289#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11290#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11291#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11292#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11293 uint32_t a_Name; \
11294 uint32_t *a_pName = &a_Name
11295#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11296 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11297
11298#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11299#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11300
11301#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11302#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11303#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11304#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11305#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11306#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11312#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11313#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11314#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11315#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11316#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11317#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11318#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11319#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11320#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11321#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11322#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11323#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11324#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11325#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11326#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11327#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11328#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11329#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11330#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11331#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11332/** @note Not for IOPL or IF testing or modification. */
11333#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11334#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11335#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11336#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11337
11338#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11339#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11340#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11341#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11342#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11343#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11344#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11345#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11346#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11347#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11348#define IEM_MC_STORE_SREG_BASE_U64(a_iSeg, a_u64Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (a_u64Value)
11349#define IEM_MC_STORE_SREG_BASE_U32(a_iSeg, a_u32Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11350#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11351 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11352
11353
11354#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11355#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11356/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11357 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11358#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11359#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11360/** @note Not for IOPL or IF testing or modification. */
11361#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11362
11363#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11364#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11365#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11366 do { \
11367 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11368 *pu32Reg += (a_u32Value); \
11369 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11370 } while (0)
11371#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11372
11373#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11374#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11375#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11376 do { \
11377 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11378 *pu32Reg -= (a_u32Value); \
11379 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11380 } while (0)
11381#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11382#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11383
11384#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11385#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11386#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11387#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11388#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11389#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11390#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11391
11392#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11393#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11394#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11395#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11396
11397#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11398#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11399#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11400
11401#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11402#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11403#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11404
11405#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11406#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11407#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11408
11409#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11410#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11411#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11412
11413#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11414
11415#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11416
11417#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11418#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11419#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11420 do { \
11421 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11422 *pu32Reg &= (a_u32Value); \
11423 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11424 } while (0)
11425#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11426
11427#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11428#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11429#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11430 do { \
11431 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11432 *pu32Reg |= (a_u32Value); \
11433 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11434 } while (0)
11435#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11436
11437
11438/** @note Not for IOPL or IF modification. */
11439#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11440/** @note Not for IOPL or IF modification. */
11441#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11442/** @note Not for IOPL or IF modification. */
11443#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11444
11445#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11446
11447/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11448#define IEM_MC_FPU_TO_MMX_MODE() do { \
11449 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11450 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11451 } while (0)
11452
11453/** Switches the FPU state from MMX mode (FTW=0xffff). */
11454#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11455 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0; \
11456 } while (0)
11457
11458#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11459 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11460#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11461 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11462#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11463 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11464 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11465 } while (0)
11466#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11467 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11468 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11469 } while (0)
11470#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11471 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11472#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11473 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11474#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11475 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11476
11477#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11478 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11479 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11480 } while (0)
11481#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11482 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11483#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11484 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11485#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11486 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11487#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11488 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11489 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11490 } while (0)
11491#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11492 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11493#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11494 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11495 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11496 } while (0)
11497#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11498 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11499#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11500 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11501 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11502 } while (0)
11503#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11504 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11505#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11506 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11507#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11508 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11509#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11510 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11511#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11512 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11513 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11514 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11515 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11516 } while (0)
11517
11518#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11519 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11520 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11521 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11522 } while (0)
11523#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11524 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11525 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11526 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11527 } while (0)
11528#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11529 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11530 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11531 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11532 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11533 } while (0)
11534#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11535 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11536 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11537 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11538 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11539 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11540 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11541 } while (0)
11542
11543#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11544#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11545 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11546 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11547 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11548 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11549 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11550 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11551 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11552 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11553 } while (0)
11554#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11555 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11556 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11557 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11558 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11559 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11560 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11561 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11562 } while (0)
11563#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11564 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11565 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11566 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11567 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11568 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11569 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11570 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11571 } while (0)
11572#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11573 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11574 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11575 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11576 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11577 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11578 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11579 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11580 } while (0)
11581
11582#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11583 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11584#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11585 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11586#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11587 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11588#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11589 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11590 uintptr_t const iYRegTmp = (a_iYReg); \
11591 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11592 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11593 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11594 } while (0)
11595
11596#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11597 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11598 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11599 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11600 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11601 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11602 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11603 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11604 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11605 } while (0)
11606#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11607 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11608 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11609 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11610 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11611 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11612 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11613 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11614 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11615 } while (0)
11616#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11617 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11618 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11619 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11620 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11621 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11622 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11623 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11624 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11625 } while (0)
11626
11627#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11628 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11629 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11630 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11631 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11632 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11633 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11634 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11635 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11636 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11637 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11638 } while (0)
11639#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11640 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11641 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11642 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11643 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11644 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11645 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11646 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11647 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11648 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11649 } while (0)
11650#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11651 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11652 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11653 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11654 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11655 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11656 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11657 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11658 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11659 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11660 } while (0)
11661#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11662 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11663 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11664 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11665 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11666 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11667 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11668 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11669 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11670 } while (0)
11671
11672#ifndef IEM_WITH_SETJMP
11673# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11674 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11675# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11676 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11677# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11678 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11679#else
11680# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11681 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11682# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11683 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11684# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11685 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11686#endif
11687
11688#ifndef IEM_WITH_SETJMP
11689# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11690 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11691# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11692 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11693# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11694 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11695#else
11696# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11697 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11698# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11699 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11700# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11701 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11702#endif
11703
11704#ifndef IEM_WITH_SETJMP
11705# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11706 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11707# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11709# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11710 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11711#else
11712# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11713 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11714# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11715 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11716# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11717 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11718#endif
11719
11720#ifdef SOME_UNUSED_FUNCTION
11721# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11723#endif
11724
11725#ifndef IEM_WITH_SETJMP
11726# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11727 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11728# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11730# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11731 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11732# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11734#else
11735# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11736 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11737# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11738 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11739# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11740 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11741# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11742 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11743#endif
11744
11745#ifndef IEM_WITH_SETJMP
11746# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11748# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11749 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11750# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11751 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11752#else
11753# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11754 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11755# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11756 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11757# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11758 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11759#endif
11760
11761#ifndef IEM_WITH_SETJMP
11762# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11763 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11764# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11765 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11766#else
11767# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11768 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11769# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11770 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11771#endif
11772
11773#ifndef IEM_WITH_SETJMP
11774# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11775 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11776# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11777 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11778#else
11779# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11780 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11781# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11782 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11783#endif
11784
11785
11786
11787#ifndef IEM_WITH_SETJMP
11788# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11789 do { \
11790 uint8_t u8Tmp; \
11791 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11792 (a_u16Dst) = u8Tmp; \
11793 } while (0)
11794# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11795 do { \
11796 uint8_t u8Tmp; \
11797 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11798 (a_u32Dst) = u8Tmp; \
11799 } while (0)
11800# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11801 do { \
11802 uint8_t u8Tmp; \
11803 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11804 (a_u64Dst) = u8Tmp; \
11805 } while (0)
11806# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11807 do { \
11808 uint16_t u16Tmp; \
11809 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11810 (a_u32Dst) = u16Tmp; \
11811 } while (0)
11812# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11813 do { \
11814 uint16_t u16Tmp; \
11815 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11816 (a_u64Dst) = u16Tmp; \
11817 } while (0)
11818# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11819 do { \
11820 uint32_t u32Tmp; \
11821 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11822 (a_u64Dst) = u32Tmp; \
11823 } while (0)
11824#else /* IEM_WITH_SETJMP */
11825# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11826 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11827# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11828 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11829# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11830 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11831# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11832 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11833# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11834 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11835# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11836 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11837#endif /* IEM_WITH_SETJMP */
11838
11839#ifndef IEM_WITH_SETJMP
11840# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11841 do { \
11842 uint8_t u8Tmp; \
11843 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11844 (a_u16Dst) = (int8_t)u8Tmp; \
11845 } while (0)
11846# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11847 do { \
11848 uint8_t u8Tmp; \
11849 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11850 (a_u32Dst) = (int8_t)u8Tmp; \
11851 } while (0)
11852# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11853 do { \
11854 uint8_t u8Tmp; \
11855 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11856 (a_u64Dst) = (int8_t)u8Tmp; \
11857 } while (0)
11858# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11859 do { \
11860 uint16_t u16Tmp; \
11861 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11862 (a_u32Dst) = (int16_t)u16Tmp; \
11863 } while (0)
11864# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11865 do { \
11866 uint16_t u16Tmp; \
11867 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11868 (a_u64Dst) = (int16_t)u16Tmp; \
11869 } while (0)
11870# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11871 do { \
11872 uint32_t u32Tmp; \
11873 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11874 (a_u64Dst) = (int32_t)u32Tmp; \
11875 } while (0)
11876#else /* IEM_WITH_SETJMP */
11877# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11878 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11879# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11880 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11881# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11882 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11883# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11884 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11885# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11886 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11887# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11888 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11889#endif /* IEM_WITH_SETJMP */
11890
11891#ifndef IEM_WITH_SETJMP
11892# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11893 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11894# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11895 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11896# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11897 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11898# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11899 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11900#else
11901# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11902 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11903# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11904 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11905# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11906 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11907# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11908 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11909#endif
11910
11911#ifndef IEM_WITH_SETJMP
11912# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11913 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11914# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11915 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11916# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11917 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11918# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11919 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11920#else
11921# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11922 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11923# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11924 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11925# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11926 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11927# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11928 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11929#endif
11930
11931#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11932#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11933#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11934#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11935#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11936#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11937#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11938 do { \
11939 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11940 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11941 } while (0)
11942
11943#ifndef IEM_WITH_SETJMP
11944# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11945 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11946# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11947 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11948#else
11949# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11950 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11951# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11952 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11953#endif
11954
11955#ifndef IEM_WITH_SETJMP
11956# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11957 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11958# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11959 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11960#else
11961# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11962 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11963# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11964 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11965#endif
11966
11967
11968#define IEM_MC_PUSH_U16(a_u16Value) \
11969 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11970#define IEM_MC_PUSH_U32(a_u32Value) \
11971 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11972#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11973 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11974#define IEM_MC_PUSH_U64(a_u64Value) \
11975 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11976
11977#define IEM_MC_POP_U16(a_pu16Value) \
11978 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11979#define IEM_MC_POP_U32(a_pu32Value) \
11980 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11981#define IEM_MC_POP_U64(a_pu64Value) \
11982 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11983
11984/** Maps guest memory for direct or bounce buffered access.
11985 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11986 * @remarks May return.
11987 */
11988#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11989 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11990
11991/** Maps guest memory for direct or bounce buffered access.
11992 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11993 * @remarks May return.
11994 */
11995#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11996 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11997
11998/** Commits the memory and unmaps the guest memory.
11999 * @remarks May return.
12000 */
12001#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12002 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12003
12004/** Commits the memory and unmaps the guest memory unless the FPU status word
12005 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12006 * that would cause FLD not to store.
12007 *
12008 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12009 * store, while \#P will not.
12010 *
12011 * @remarks May in theory return - for now.
12012 */
12013#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12014 do { \
12015 if ( !(a_u16FSW & X86_FSW_ES) \
12016 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12017 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12018 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12019 } while (0)
12020
12021/** Calculate efficient address from R/M. */
12022#ifndef IEM_WITH_SETJMP
12023# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12024 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12025#else
12026# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12027 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12028#endif
12029
12030#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12031#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12032#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12033#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12034#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12035#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12036#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12037
12038/**
12039 * Defers the rest of the instruction emulation to a C implementation routine
12040 * and returns, only taking the standard parameters.
12041 *
12042 * @param a_pfnCImpl The pointer to the C routine.
12043 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12044 */
12045#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12046
12047/**
12048 * Defers the rest of instruction emulation to a C implementation routine and
12049 * returns, taking one argument in addition to the standard ones.
12050 *
12051 * @param a_pfnCImpl The pointer to the C routine.
12052 * @param a0 The argument.
12053 */
12054#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12055
12056/**
12057 * Defers the rest of the instruction emulation to a C implementation routine
12058 * and returns, taking two arguments in addition to the standard ones.
12059 *
12060 * @param a_pfnCImpl The pointer to the C routine.
12061 * @param a0 The first extra argument.
12062 * @param a1 The second extra argument.
12063 */
12064#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12065
12066/**
12067 * Defers the rest of the instruction emulation to a C implementation routine
12068 * and returns, taking three arguments in addition to the standard ones.
12069 *
12070 * @param a_pfnCImpl The pointer to the C routine.
12071 * @param a0 The first extra argument.
12072 * @param a1 The second extra argument.
12073 * @param a2 The third extra argument.
12074 */
12075#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12076
12077/**
12078 * Defers the rest of the instruction emulation to a C implementation routine
12079 * and returns, taking four arguments in addition to the standard ones.
12080 *
12081 * @param a_pfnCImpl The pointer to the C routine.
12082 * @param a0 The first extra argument.
12083 * @param a1 The second extra argument.
12084 * @param a2 The third extra argument.
12085 * @param a3 The fourth extra argument.
12086 */
12087#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12088
12089/**
12090 * Defers the rest of the instruction emulation to a C implementation routine
12091 * and returns, taking two arguments in addition to the standard ones.
12092 *
12093 * @param a_pfnCImpl The pointer to the C routine.
12094 * @param a0 The first extra argument.
12095 * @param a1 The second extra argument.
12096 * @param a2 The third extra argument.
12097 * @param a3 The fourth extra argument.
12098 * @param a4 The fifth extra argument.
12099 */
12100#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12101
12102/**
12103 * Defers the entire instruction emulation to a C implementation routine and
12104 * returns, only taking the standard parameters.
12105 *
12106 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12107 *
12108 * @param a_pfnCImpl The pointer to the C routine.
12109 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12110 */
12111#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12112
12113/**
12114 * Defers the entire instruction emulation to a C implementation routine and
12115 * returns, taking one argument in addition to the standard ones.
12116 *
12117 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12118 *
12119 * @param a_pfnCImpl The pointer to the C routine.
12120 * @param a0 The argument.
12121 */
12122#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12123
12124/**
12125 * Defers the entire instruction emulation to a C implementation routine and
12126 * returns, taking two arguments in addition to the standard ones.
12127 *
12128 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12129 *
12130 * @param a_pfnCImpl The pointer to the C routine.
12131 * @param a0 The first extra argument.
12132 * @param a1 The second extra argument.
12133 */
12134#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12135
12136/**
12137 * Defers the entire instruction emulation to a C implementation routine and
12138 * returns, taking three arguments in addition to the standard ones.
12139 *
12140 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12141 *
12142 * @param a_pfnCImpl The pointer to the C routine.
12143 * @param a0 The first extra argument.
12144 * @param a1 The second extra argument.
12145 * @param a2 The third extra argument.
12146 */
12147#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12148
12149/**
12150 * Calls a FPU assembly implementation taking one visible argument.
12151 *
12152 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12153 * @param a0 The first extra argument.
12154 */
12155#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12156 do { \
12157 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12158 } while (0)
12159
12160/**
12161 * Calls a FPU assembly implementation taking two visible arguments.
12162 *
12163 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12164 * @param a0 The first extra argument.
12165 * @param a1 The second extra argument.
12166 */
12167#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12168 do { \
12169 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12170 } while (0)
12171
12172/**
12173 * Calls a FPU assembly implementation taking three visible arguments.
12174 *
12175 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12176 * @param a0 The first extra argument.
12177 * @param a1 The second extra argument.
12178 * @param a2 The third extra argument.
12179 */
12180#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12181 do { \
12182 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12183 } while (0)
12184
12185#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12186 do { \
12187 (a_FpuData).FSW = (a_FSW); \
12188 (a_FpuData).r80Result = *(a_pr80Value); \
12189 } while (0)
12190
12191/** Pushes FPU result onto the stack. */
12192#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12193 iemFpuPushResult(pVCpu, &a_FpuData)
12194/** Pushes FPU result onto the stack and sets the FPUDP. */
12195#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12196 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12197
12198/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12199#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12200 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12201
12202/** Stores FPU result in a stack register. */
12203#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12204 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12205/** Stores FPU result in a stack register and pops the stack. */
12206#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12207 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12208/** Stores FPU result in a stack register and sets the FPUDP. */
12209#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12210 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12211/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12212 * stack. */
12213#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12214 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12215
12216/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12217#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12218 iemFpuUpdateOpcodeAndIp(pVCpu)
12219/** Free a stack register (for FFREE and FFREEP). */
12220#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12221 iemFpuStackFree(pVCpu, a_iStReg)
12222/** Increment the FPU stack pointer. */
12223#define IEM_MC_FPU_STACK_INC_TOP() \
12224 iemFpuStackIncTop(pVCpu)
12225/** Decrement the FPU stack pointer. */
12226#define IEM_MC_FPU_STACK_DEC_TOP() \
12227 iemFpuStackDecTop(pVCpu)
12228
12229/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12230#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12231 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12232/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12233#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12234 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12235/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12236#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12237 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12238/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12239#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12240 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12241/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12242 * stack. */
12243#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12244 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12245/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12246#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12247 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12248
12249/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12250#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12251 iemFpuStackUnderflow(pVCpu, a_iStDst)
12252/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12253 * stack. */
12254#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12255 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12256/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12257 * FPUDS. */
12258#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12259 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12260/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12261 * FPUDS. Pops stack. */
12262#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12263 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12264/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12265 * stack twice. */
12266#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12267 iemFpuStackUnderflowThenPopPop(pVCpu)
12268/** Raises a FPU stack underflow exception for an instruction pushing a result
12269 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12270#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12271 iemFpuStackPushUnderflow(pVCpu)
12272/** Raises a FPU stack underflow exception for an instruction pushing a result
12273 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12274#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12275 iemFpuStackPushUnderflowTwo(pVCpu)
12276
12277/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12278 * FPUIP, FPUCS and FOP. */
12279#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12280 iemFpuStackPushOverflow(pVCpu)
12281/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12282 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12283#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12284 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12285/** Prepares for using the FPU state.
12286 * Ensures that we can use the host FPU in the current context (RC+R0.
12287 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12288#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12289/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12290#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12291/** Actualizes the guest FPU state so it can be accessed and modified. */
12292#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12293
12294/** Prepares for using the SSE state.
12295 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12296 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12297#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12298/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12299#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12300/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12301#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12302
12303/** Prepares for using the AVX state.
12304 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12305 * Ensures the guest AVX state in the CPUMCTX is up to date.
12306 * @note This will include the AVX512 state too when support for it is added
12307 * due to the zero extending feature of VEX instruction. */
12308#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12309/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12310#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12311/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12312#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12313
12314/**
12315 * Calls a MMX assembly implementation taking two visible arguments.
12316 *
12317 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12318 * @param a0 The first extra argument.
12319 * @param a1 The second extra argument.
12320 */
12321#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12322 do { \
12323 IEM_MC_PREPARE_FPU_USAGE(); \
12324 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12325 } while (0)
12326
12327/**
12328 * Calls a MMX assembly implementation taking three visible arguments.
12329 *
12330 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12331 * @param a0 The first extra argument.
12332 * @param a1 The second extra argument.
12333 * @param a2 The third extra argument.
12334 */
12335#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12336 do { \
12337 IEM_MC_PREPARE_FPU_USAGE(); \
12338 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12339 } while (0)
12340
12341
12342/**
12343 * Calls a SSE assembly implementation taking two visible arguments.
12344 *
12345 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12346 * @param a0 The first extra argument.
12347 * @param a1 The second extra argument.
12348 */
12349#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12350 do { \
12351 IEM_MC_PREPARE_SSE_USAGE(); \
12352 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12353 } while (0)
12354
12355/**
12356 * Calls a SSE assembly implementation taking three visible arguments.
12357 *
12358 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12359 * @param a0 The first extra argument.
12360 * @param a1 The second extra argument.
12361 * @param a2 The third extra argument.
12362 */
12363#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12364 do { \
12365 IEM_MC_PREPARE_SSE_USAGE(); \
12366 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12367 } while (0)
12368
12369
12370/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12371 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12372#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12373 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0)
12374
12375/**
12376 * Calls a AVX assembly implementation taking two visible arguments.
12377 *
12378 * There is one implicit zero'th argument, a pointer to the extended state.
12379 *
12380 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12381 * @param a1 The first extra argument.
12382 * @param a2 The second extra argument.
12383 */
12384#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12385 do { \
12386 IEM_MC_PREPARE_AVX_USAGE(); \
12387 a_pfnAImpl(pXState, (a1), (a2)); \
12388 } while (0)
12389
12390/**
12391 * Calls a AVX assembly implementation taking three visible arguments.
12392 *
12393 * There is one implicit zero'th argument, a pointer to the extended state.
12394 *
12395 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12396 * @param a1 The first extra argument.
12397 * @param a2 The second extra argument.
12398 * @param a3 The third extra argument.
12399 */
12400#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12401 do { \
12402 IEM_MC_PREPARE_AVX_USAGE(); \
12403 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12404 } while (0)
12405
12406/** @note Not for IOPL or IF testing. */
12407#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12408/** @note Not for IOPL or IF testing. */
12409#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12410/** @note Not for IOPL or IF testing. */
12411#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12412/** @note Not for IOPL or IF testing. */
12413#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12414/** @note Not for IOPL or IF testing. */
12415#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12416 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12417 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12418/** @note Not for IOPL or IF testing. */
12419#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12420 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12421 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12422/** @note Not for IOPL or IF testing. */
12423#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12424 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12425 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12426 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12427/** @note Not for IOPL or IF testing. */
12428#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12429 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12430 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12431 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12432#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12433#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12434#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12435/** @note Not for IOPL or IF testing. */
12436#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12437 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12438 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12439/** @note Not for IOPL or IF testing. */
12440#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12441 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12442 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12443/** @note Not for IOPL or IF testing. */
12444#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12445 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12446 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12447/** @note Not for IOPL or IF testing. */
12448#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12449 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12450 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12451/** @note Not for IOPL or IF testing. */
12452#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12453 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12454 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12455/** @note Not for IOPL or IF testing. */
12456#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12457 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12458 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12459#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12460#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12461
12462#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12463 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12464#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12465 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12466#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12467 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12468#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12469 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12470#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12471 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12472#define IEM_MC_IF_FCW_IM() \
12473 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12474
12475#define IEM_MC_ELSE() } else {
12476#define IEM_MC_ENDIF() } do {} while (0)
12477
12478/** @} */
12479
12480
12481/** @name Opcode Debug Helpers.
12482 * @{
12483 */
12484#ifdef VBOX_WITH_STATISTICS
12485# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12486#else
12487# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12488#endif
12489
12490#ifdef DEBUG
12491# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12492 do { \
12493 IEMOP_INC_STATS(a_Stats); \
12494 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12495 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12496 } while (0)
12497
12498# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12499 do { \
12500 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12501 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12502 (void)RT_CONCAT(OP_,a_Upper); \
12503 (void)(a_fDisHints); \
12504 (void)(a_fIemHints); \
12505 } while (0)
12506
12507# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12508 do { \
12509 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12510 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12511 (void)RT_CONCAT(OP_,a_Upper); \
12512 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12513 (void)(a_fDisHints); \
12514 (void)(a_fIemHints); \
12515 } while (0)
12516
12517# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12518 do { \
12519 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12520 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12521 (void)RT_CONCAT(OP_,a_Upper); \
12522 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12523 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12524 (void)(a_fDisHints); \
12525 (void)(a_fIemHints); \
12526 } while (0)
12527
12528# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12529 do { \
12530 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12531 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12532 (void)RT_CONCAT(OP_,a_Upper); \
12533 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12534 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12535 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12536 (void)(a_fDisHints); \
12537 (void)(a_fIemHints); \
12538 } while (0)
12539
12540# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12541 do { \
12542 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12543 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12544 (void)RT_CONCAT(OP_,a_Upper); \
12545 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12546 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12547 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12548 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12549 (void)(a_fDisHints); \
12550 (void)(a_fIemHints); \
12551 } while (0)
12552
12553#else
12554# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12555
12556# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12557 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12558# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12559 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12560# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12561 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12562# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12563 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12564# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12565 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12566
12567#endif
12568
12569#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12570 IEMOP_MNEMONIC0EX(a_Lower, \
12571 #a_Lower, \
12572 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12573#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12574 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12575 #a_Lower " " #a_Op1, \
12576 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12577#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12578 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12579 #a_Lower " " #a_Op1 "," #a_Op2, \
12580 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12581#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12582 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12583 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12584 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12585#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12586 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12587 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12588 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12589
12590/** @} */
12591
12592
12593/** @name Opcode Helpers.
12594 * @{
12595 */
12596
12597#ifdef IN_RING3
12598# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12599 do { \
12600 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12601 else \
12602 { \
12603 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12604 return IEMOP_RAISE_INVALID_OPCODE(); \
12605 } \
12606 } while (0)
12607#else
12608# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12609 do { \
12610 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12611 else return IEMOP_RAISE_INVALID_OPCODE(); \
12612 } while (0)
12613#endif
12614
12615/** The instruction requires a 186 or later. */
12616#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12617# define IEMOP_HLP_MIN_186() do { } while (0)
12618#else
12619# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12620#endif
12621
12622/** The instruction requires a 286 or later. */
12623#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12624# define IEMOP_HLP_MIN_286() do { } while (0)
12625#else
12626# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12627#endif
12628
12629/** The instruction requires a 386 or later. */
12630#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12631# define IEMOP_HLP_MIN_386() do { } while (0)
12632#else
12633# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12634#endif
12635
12636/** The instruction requires a 386 or later if the given expression is true. */
12637#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12638# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12639#else
12640# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12641#endif
12642
12643/** The instruction requires a 486 or later. */
12644#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12645# define IEMOP_HLP_MIN_486() do { } while (0)
12646#else
12647# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12648#endif
12649
12650/** The instruction requires a Pentium (586) or later. */
12651#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12652# define IEMOP_HLP_MIN_586() do { } while (0)
12653#else
12654# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12655#endif
12656
12657/** The instruction requires a PentiumPro (686) or later. */
12658#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12659# define IEMOP_HLP_MIN_686() do { } while (0)
12660#else
12661# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12662#endif
12663
12664
12665/** The instruction raises an \#UD in real and V8086 mode. */
12666#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12667 do \
12668 { \
12669 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12670 else return IEMOP_RAISE_INVALID_OPCODE(); \
12671 } while (0)
12672
12673/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12674 * 64-bit mode. */
12675#define IEMOP_HLP_NO_64BIT() \
12676 do \
12677 { \
12678 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12679 return IEMOP_RAISE_INVALID_OPCODE(); \
12680 } while (0)
12681
12682/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12683 * 64-bit mode. */
12684#define IEMOP_HLP_ONLY_64BIT() \
12685 do \
12686 { \
12687 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12688 return IEMOP_RAISE_INVALID_OPCODE(); \
12689 } while (0)
12690
12691/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12692#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12693 do \
12694 { \
12695 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12696 iemRecalEffOpSize64Default(pVCpu); \
12697 } while (0)
12698
12699/** The instruction has 64-bit operand size if 64-bit mode. */
12700#define IEMOP_HLP_64BIT_OP_SIZE() \
12701 do \
12702 { \
12703 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12704 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12705 } while (0)
12706
12707/** Only a REX prefix immediately preceeding the first opcode byte takes
12708 * effect. This macro helps ensuring this as well as logging bad guest code. */
12709#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12710 do \
12711 { \
12712 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12713 { \
12714 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12715 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12716 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12717 pVCpu->iem.s.uRexB = 0; \
12718 pVCpu->iem.s.uRexIndex = 0; \
12719 pVCpu->iem.s.uRexReg = 0; \
12720 iemRecalEffOpSize(pVCpu); \
12721 } \
12722 } while (0)
12723
12724/**
12725 * Done decoding.
12726 */
12727#define IEMOP_HLP_DONE_DECODING() \
12728 do \
12729 { \
12730 /*nothing for now, maybe later... */ \
12731 } while (0)
12732
12733/**
12734 * Done decoding, raise \#UD exception if lock prefix present.
12735 */
12736#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12737 do \
12738 { \
12739 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12740 { /* likely */ } \
12741 else \
12742 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12743 } while (0)
12744
12745
12746/**
12747 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12748 * repnz or size prefixes are present, or if in real or v8086 mode.
12749 */
12750#define IEMOP_HLP_DONE_VEX_DECODING() \
12751 do \
12752 { \
12753 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12754 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12755 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12756 { /* likely */ } \
12757 else \
12758 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12759 } while (0)
12760
12761/**
12762 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12763 * repnz or size prefixes are present, or if in real or v8086 mode.
12764 */
12765#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12766 do \
12767 { \
12768 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12769 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12770 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12771 && pVCpu->iem.s.uVexLength == 0)) \
12772 { /* likely */ } \
12773 else \
12774 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12775 } while (0)
12776
12777
12778/**
12779 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12780 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12781 * register 0, or if in real or v8086 mode.
12782 */
12783#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12784 do \
12785 { \
12786 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12787 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12788 && !pVCpu->iem.s.uVex3rdReg \
12789 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12790 { /* likely */ } \
12791 else \
12792 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12793 } while (0)
12794
12795/**
12796 * Done decoding VEX, no V, L=0.
12797 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12798 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12799 */
12800#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12801 do \
12802 { \
12803 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12804 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12805 && pVCpu->iem.s.uVexLength == 0 \
12806 && pVCpu->iem.s.uVex3rdReg == 0 \
12807 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12808 { /* likely */ } \
12809 else \
12810 return IEMOP_RAISE_INVALID_OPCODE(); \
12811 } while (0)
12812
12813#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12814 do \
12815 { \
12816 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12817 { /* likely */ } \
12818 else \
12819 { \
12820 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12821 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12822 } \
12823 } while (0)
12824#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12825 do \
12826 { \
12827 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12828 { /* likely */ } \
12829 else \
12830 { \
12831 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12832 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12833 } \
12834 } while (0)
12835
12836/**
12837 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12838 * are present.
12839 */
12840#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12841 do \
12842 { \
12843 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12844 { /* likely */ } \
12845 else \
12846 return IEMOP_RAISE_INVALID_OPCODE(); \
12847 } while (0)
12848
12849
12850#ifdef VBOX_WITH_NESTED_HWVIRT
12851/** Check and handles SVM nested-guest instruction intercept and updates
12852 * NRIP if needed. */
12853# define IEMOP_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12854 do \
12855 { \
12856 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12857 { \
12858 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
12859 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12860 } \
12861 } while (0)
12862
12863/** Check and handle SVM nested-guest CR0 read intercept. */
12864# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12865 do \
12866 { \
12867 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12868 { \
12869 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
12870 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12871 } \
12872 } while (0)
12873
12874#else /* !VBOX_WITH_NESTED_HWVIRT */
12875# define IEMOP_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12876# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12877#endif /* !VBOX_WITH_NESTED_HWVIRT */
12878
12879
12880/**
12881 * Calculates the effective address of a ModR/M memory operand.
12882 *
12883 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12884 *
12885 * @return Strict VBox status code.
12886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12887 * @param bRm The ModRM byte.
12888 * @param cbImm The size of any immediate following the
12889 * effective address opcode bytes. Important for
12890 * RIP relative addressing.
12891 * @param pGCPtrEff Where to return the effective address.
12892 */
12893IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12894{
12895 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12896 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12897# define SET_SS_DEF() \
12898 do \
12899 { \
12900 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12901 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12902 } while (0)
12903
12904 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12905 {
12906/** @todo Check the effective address size crap! */
12907 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12908 {
12909 uint16_t u16EffAddr;
12910
12911 /* Handle the disp16 form with no registers first. */
12912 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12913 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12914 else
12915 {
12916 /* Get the displacment. */
12917 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12918 {
12919 case 0: u16EffAddr = 0; break;
12920 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12921 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12922 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12923 }
12924
12925 /* Add the base and index registers to the disp. */
12926 switch (bRm & X86_MODRM_RM_MASK)
12927 {
12928 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12929 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12930 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12931 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12932 case 4: u16EffAddr += pCtx->si; break;
12933 case 5: u16EffAddr += pCtx->di; break;
12934 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12935 case 7: u16EffAddr += pCtx->bx; break;
12936 }
12937 }
12938
12939 *pGCPtrEff = u16EffAddr;
12940 }
12941 else
12942 {
12943 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12944 uint32_t u32EffAddr;
12945
12946 /* Handle the disp32 form with no registers first. */
12947 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12948 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12949 else
12950 {
12951 /* Get the register (or SIB) value. */
12952 switch ((bRm & X86_MODRM_RM_MASK))
12953 {
12954 case 0: u32EffAddr = pCtx->eax; break;
12955 case 1: u32EffAddr = pCtx->ecx; break;
12956 case 2: u32EffAddr = pCtx->edx; break;
12957 case 3: u32EffAddr = pCtx->ebx; break;
12958 case 4: /* SIB */
12959 {
12960 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12961
12962 /* Get the index and scale it. */
12963 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12964 {
12965 case 0: u32EffAddr = pCtx->eax; break;
12966 case 1: u32EffAddr = pCtx->ecx; break;
12967 case 2: u32EffAddr = pCtx->edx; break;
12968 case 3: u32EffAddr = pCtx->ebx; break;
12969 case 4: u32EffAddr = 0; /*none */ break;
12970 case 5: u32EffAddr = pCtx->ebp; break;
12971 case 6: u32EffAddr = pCtx->esi; break;
12972 case 7: u32EffAddr = pCtx->edi; break;
12973 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12974 }
12975 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12976
12977 /* add base */
12978 switch (bSib & X86_SIB_BASE_MASK)
12979 {
12980 case 0: u32EffAddr += pCtx->eax; break;
12981 case 1: u32EffAddr += pCtx->ecx; break;
12982 case 2: u32EffAddr += pCtx->edx; break;
12983 case 3: u32EffAddr += pCtx->ebx; break;
12984 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12985 case 5:
12986 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12987 {
12988 u32EffAddr += pCtx->ebp;
12989 SET_SS_DEF();
12990 }
12991 else
12992 {
12993 uint32_t u32Disp;
12994 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12995 u32EffAddr += u32Disp;
12996 }
12997 break;
12998 case 6: u32EffAddr += pCtx->esi; break;
12999 case 7: u32EffAddr += pCtx->edi; break;
13000 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13001 }
13002 break;
13003 }
13004 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13005 case 6: u32EffAddr = pCtx->esi; break;
13006 case 7: u32EffAddr = pCtx->edi; break;
13007 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13008 }
13009
13010 /* Get and add the displacement. */
13011 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13012 {
13013 case 0:
13014 break;
13015 case 1:
13016 {
13017 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13018 u32EffAddr += i8Disp;
13019 break;
13020 }
13021 case 2:
13022 {
13023 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13024 u32EffAddr += u32Disp;
13025 break;
13026 }
13027 default:
13028 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13029 }
13030
13031 }
13032 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13033 *pGCPtrEff = u32EffAddr;
13034 else
13035 {
13036 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13037 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13038 }
13039 }
13040 }
13041 else
13042 {
13043 uint64_t u64EffAddr;
13044
13045 /* Handle the rip+disp32 form with no registers first. */
13046 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13047 {
13048 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13049 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13050 }
13051 else
13052 {
13053 /* Get the register (or SIB) value. */
13054 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13055 {
13056 case 0: u64EffAddr = pCtx->rax; break;
13057 case 1: u64EffAddr = pCtx->rcx; break;
13058 case 2: u64EffAddr = pCtx->rdx; break;
13059 case 3: u64EffAddr = pCtx->rbx; break;
13060 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13061 case 6: u64EffAddr = pCtx->rsi; break;
13062 case 7: u64EffAddr = pCtx->rdi; break;
13063 case 8: u64EffAddr = pCtx->r8; break;
13064 case 9: u64EffAddr = pCtx->r9; break;
13065 case 10: u64EffAddr = pCtx->r10; break;
13066 case 11: u64EffAddr = pCtx->r11; break;
13067 case 13: u64EffAddr = pCtx->r13; break;
13068 case 14: u64EffAddr = pCtx->r14; break;
13069 case 15: u64EffAddr = pCtx->r15; break;
13070 /* SIB */
13071 case 4:
13072 case 12:
13073 {
13074 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13075
13076 /* Get the index and scale it. */
13077 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13078 {
13079 case 0: u64EffAddr = pCtx->rax; break;
13080 case 1: u64EffAddr = pCtx->rcx; break;
13081 case 2: u64EffAddr = pCtx->rdx; break;
13082 case 3: u64EffAddr = pCtx->rbx; break;
13083 case 4: u64EffAddr = 0; /*none */ break;
13084 case 5: u64EffAddr = pCtx->rbp; break;
13085 case 6: u64EffAddr = pCtx->rsi; break;
13086 case 7: u64EffAddr = pCtx->rdi; break;
13087 case 8: u64EffAddr = pCtx->r8; break;
13088 case 9: u64EffAddr = pCtx->r9; break;
13089 case 10: u64EffAddr = pCtx->r10; break;
13090 case 11: u64EffAddr = pCtx->r11; break;
13091 case 12: u64EffAddr = pCtx->r12; break;
13092 case 13: u64EffAddr = pCtx->r13; break;
13093 case 14: u64EffAddr = pCtx->r14; break;
13094 case 15: u64EffAddr = pCtx->r15; break;
13095 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13096 }
13097 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13098
13099 /* add base */
13100 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13101 {
13102 case 0: u64EffAddr += pCtx->rax; break;
13103 case 1: u64EffAddr += pCtx->rcx; break;
13104 case 2: u64EffAddr += pCtx->rdx; break;
13105 case 3: u64EffAddr += pCtx->rbx; break;
13106 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13107 case 6: u64EffAddr += pCtx->rsi; break;
13108 case 7: u64EffAddr += pCtx->rdi; break;
13109 case 8: u64EffAddr += pCtx->r8; break;
13110 case 9: u64EffAddr += pCtx->r9; break;
13111 case 10: u64EffAddr += pCtx->r10; break;
13112 case 11: u64EffAddr += pCtx->r11; break;
13113 case 12: u64EffAddr += pCtx->r12; break;
13114 case 14: u64EffAddr += pCtx->r14; break;
13115 case 15: u64EffAddr += pCtx->r15; break;
13116 /* complicated encodings */
13117 case 5:
13118 case 13:
13119 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13120 {
13121 if (!pVCpu->iem.s.uRexB)
13122 {
13123 u64EffAddr += pCtx->rbp;
13124 SET_SS_DEF();
13125 }
13126 else
13127 u64EffAddr += pCtx->r13;
13128 }
13129 else
13130 {
13131 uint32_t u32Disp;
13132 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13133 u64EffAddr += (int32_t)u32Disp;
13134 }
13135 break;
13136 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13137 }
13138 break;
13139 }
13140 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13141 }
13142
13143 /* Get and add the displacement. */
13144 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13145 {
13146 case 0:
13147 break;
13148 case 1:
13149 {
13150 int8_t i8Disp;
13151 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13152 u64EffAddr += i8Disp;
13153 break;
13154 }
13155 case 2:
13156 {
13157 uint32_t u32Disp;
13158 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13159 u64EffAddr += (int32_t)u32Disp;
13160 break;
13161 }
13162 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13163 }
13164
13165 }
13166
13167 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13168 *pGCPtrEff = u64EffAddr;
13169 else
13170 {
13171 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13172 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13173 }
13174 }
13175
13176 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13177 return VINF_SUCCESS;
13178}
13179
13180
13181/**
13182 * Calculates the effective address of a ModR/M memory operand.
13183 *
13184 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13185 *
13186 * @return Strict VBox status code.
13187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13188 * @param bRm The ModRM byte.
13189 * @param cbImm The size of any immediate following the
13190 * effective address opcode bytes. Important for
13191 * RIP relative addressing.
13192 * @param pGCPtrEff Where to return the effective address.
13193 * @param offRsp RSP displacement.
13194 */
13195IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13196{
13197 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13198 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13199# define SET_SS_DEF() \
13200 do \
13201 { \
13202 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13203 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13204 } while (0)
13205
13206 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13207 {
13208/** @todo Check the effective address size crap! */
13209 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13210 {
13211 uint16_t u16EffAddr;
13212
13213 /* Handle the disp16 form with no registers first. */
13214 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13215 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13216 else
13217 {
13218 /* Get the displacment. */
13219 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13220 {
13221 case 0: u16EffAddr = 0; break;
13222 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13223 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13224 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13225 }
13226
13227 /* Add the base and index registers to the disp. */
13228 switch (bRm & X86_MODRM_RM_MASK)
13229 {
13230 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13231 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13232 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13233 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13234 case 4: u16EffAddr += pCtx->si; break;
13235 case 5: u16EffAddr += pCtx->di; break;
13236 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13237 case 7: u16EffAddr += pCtx->bx; break;
13238 }
13239 }
13240
13241 *pGCPtrEff = u16EffAddr;
13242 }
13243 else
13244 {
13245 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13246 uint32_t u32EffAddr;
13247
13248 /* Handle the disp32 form with no registers first. */
13249 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13250 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13251 else
13252 {
13253 /* Get the register (or SIB) value. */
13254 switch ((bRm & X86_MODRM_RM_MASK))
13255 {
13256 case 0: u32EffAddr = pCtx->eax; break;
13257 case 1: u32EffAddr = pCtx->ecx; break;
13258 case 2: u32EffAddr = pCtx->edx; break;
13259 case 3: u32EffAddr = pCtx->ebx; break;
13260 case 4: /* SIB */
13261 {
13262 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13263
13264 /* Get the index and scale it. */
13265 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13266 {
13267 case 0: u32EffAddr = pCtx->eax; break;
13268 case 1: u32EffAddr = pCtx->ecx; break;
13269 case 2: u32EffAddr = pCtx->edx; break;
13270 case 3: u32EffAddr = pCtx->ebx; break;
13271 case 4: u32EffAddr = 0; /*none */ break;
13272 case 5: u32EffAddr = pCtx->ebp; break;
13273 case 6: u32EffAddr = pCtx->esi; break;
13274 case 7: u32EffAddr = pCtx->edi; break;
13275 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13276 }
13277 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13278
13279 /* add base */
13280 switch (bSib & X86_SIB_BASE_MASK)
13281 {
13282 case 0: u32EffAddr += pCtx->eax; break;
13283 case 1: u32EffAddr += pCtx->ecx; break;
13284 case 2: u32EffAddr += pCtx->edx; break;
13285 case 3: u32EffAddr += pCtx->ebx; break;
13286 case 4:
13287 u32EffAddr += pCtx->esp + offRsp;
13288 SET_SS_DEF();
13289 break;
13290 case 5:
13291 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13292 {
13293 u32EffAddr += pCtx->ebp;
13294 SET_SS_DEF();
13295 }
13296 else
13297 {
13298 uint32_t u32Disp;
13299 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13300 u32EffAddr += u32Disp;
13301 }
13302 break;
13303 case 6: u32EffAddr += pCtx->esi; break;
13304 case 7: u32EffAddr += pCtx->edi; break;
13305 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13306 }
13307 break;
13308 }
13309 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13310 case 6: u32EffAddr = pCtx->esi; break;
13311 case 7: u32EffAddr = pCtx->edi; break;
13312 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13313 }
13314
13315 /* Get and add the displacement. */
13316 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13317 {
13318 case 0:
13319 break;
13320 case 1:
13321 {
13322 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13323 u32EffAddr += i8Disp;
13324 break;
13325 }
13326 case 2:
13327 {
13328 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13329 u32EffAddr += u32Disp;
13330 break;
13331 }
13332 default:
13333 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13334 }
13335
13336 }
13337 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13338 *pGCPtrEff = u32EffAddr;
13339 else
13340 {
13341 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13342 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13343 }
13344 }
13345 }
13346 else
13347 {
13348 uint64_t u64EffAddr;
13349
13350 /* Handle the rip+disp32 form with no registers first. */
13351 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13352 {
13353 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13354 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13355 }
13356 else
13357 {
13358 /* Get the register (or SIB) value. */
13359 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13360 {
13361 case 0: u64EffAddr = pCtx->rax; break;
13362 case 1: u64EffAddr = pCtx->rcx; break;
13363 case 2: u64EffAddr = pCtx->rdx; break;
13364 case 3: u64EffAddr = pCtx->rbx; break;
13365 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13366 case 6: u64EffAddr = pCtx->rsi; break;
13367 case 7: u64EffAddr = pCtx->rdi; break;
13368 case 8: u64EffAddr = pCtx->r8; break;
13369 case 9: u64EffAddr = pCtx->r9; break;
13370 case 10: u64EffAddr = pCtx->r10; break;
13371 case 11: u64EffAddr = pCtx->r11; break;
13372 case 13: u64EffAddr = pCtx->r13; break;
13373 case 14: u64EffAddr = pCtx->r14; break;
13374 case 15: u64EffAddr = pCtx->r15; break;
13375 /* SIB */
13376 case 4:
13377 case 12:
13378 {
13379 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13380
13381 /* Get the index and scale it. */
13382 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13383 {
13384 case 0: u64EffAddr = pCtx->rax; break;
13385 case 1: u64EffAddr = pCtx->rcx; break;
13386 case 2: u64EffAddr = pCtx->rdx; break;
13387 case 3: u64EffAddr = pCtx->rbx; break;
13388 case 4: u64EffAddr = 0; /*none */ break;
13389 case 5: u64EffAddr = pCtx->rbp; break;
13390 case 6: u64EffAddr = pCtx->rsi; break;
13391 case 7: u64EffAddr = pCtx->rdi; break;
13392 case 8: u64EffAddr = pCtx->r8; break;
13393 case 9: u64EffAddr = pCtx->r9; break;
13394 case 10: u64EffAddr = pCtx->r10; break;
13395 case 11: u64EffAddr = pCtx->r11; break;
13396 case 12: u64EffAddr = pCtx->r12; break;
13397 case 13: u64EffAddr = pCtx->r13; break;
13398 case 14: u64EffAddr = pCtx->r14; break;
13399 case 15: u64EffAddr = pCtx->r15; break;
13400 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13401 }
13402 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13403
13404 /* add base */
13405 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13406 {
13407 case 0: u64EffAddr += pCtx->rax; break;
13408 case 1: u64EffAddr += pCtx->rcx; break;
13409 case 2: u64EffAddr += pCtx->rdx; break;
13410 case 3: u64EffAddr += pCtx->rbx; break;
13411 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13412 case 6: u64EffAddr += pCtx->rsi; break;
13413 case 7: u64EffAddr += pCtx->rdi; break;
13414 case 8: u64EffAddr += pCtx->r8; break;
13415 case 9: u64EffAddr += pCtx->r9; break;
13416 case 10: u64EffAddr += pCtx->r10; break;
13417 case 11: u64EffAddr += pCtx->r11; break;
13418 case 12: u64EffAddr += pCtx->r12; break;
13419 case 14: u64EffAddr += pCtx->r14; break;
13420 case 15: u64EffAddr += pCtx->r15; break;
13421 /* complicated encodings */
13422 case 5:
13423 case 13:
13424 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13425 {
13426 if (!pVCpu->iem.s.uRexB)
13427 {
13428 u64EffAddr += pCtx->rbp;
13429 SET_SS_DEF();
13430 }
13431 else
13432 u64EffAddr += pCtx->r13;
13433 }
13434 else
13435 {
13436 uint32_t u32Disp;
13437 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13438 u64EffAddr += (int32_t)u32Disp;
13439 }
13440 break;
13441 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13442 }
13443 break;
13444 }
13445 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13446 }
13447
13448 /* Get and add the displacement. */
13449 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13450 {
13451 case 0:
13452 break;
13453 case 1:
13454 {
13455 int8_t i8Disp;
13456 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13457 u64EffAddr += i8Disp;
13458 break;
13459 }
13460 case 2:
13461 {
13462 uint32_t u32Disp;
13463 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13464 u64EffAddr += (int32_t)u32Disp;
13465 break;
13466 }
13467 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13468 }
13469
13470 }
13471
13472 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13473 *pGCPtrEff = u64EffAddr;
13474 else
13475 {
13476 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13477 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13478 }
13479 }
13480
13481 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13482 return VINF_SUCCESS;
13483}
13484
13485
13486#ifdef IEM_WITH_SETJMP
13487/**
13488 * Calculates the effective address of a ModR/M memory operand.
13489 *
13490 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13491 *
13492 * May longjmp on internal error.
13493 *
13494 * @return The effective address.
13495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13496 * @param bRm The ModRM byte.
13497 * @param cbImm The size of any immediate following the
13498 * effective address opcode bytes. Important for
13499 * RIP relative addressing.
13500 */
13501IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13502{
13503 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13504 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13505# define SET_SS_DEF() \
13506 do \
13507 { \
13508 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13509 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13510 } while (0)
13511
13512 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13513 {
13514/** @todo Check the effective address size crap! */
13515 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13516 {
13517 uint16_t u16EffAddr;
13518
13519 /* Handle the disp16 form with no registers first. */
13520 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13521 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13522 else
13523 {
13524 /* Get the displacment. */
13525 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13526 {
13527 case 0: u16EffAddr = 0; break;
13528 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13529 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13530 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13531 }
13532
13533 /* Add the base and index registers to the disp. */
13534 switch (bRm & X86_MODRM_RM_MASK)
13535 {
13536 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13537 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13538 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13539 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13540 case 4: u16EffAddr += pCtx->si; break;
13541 case 5: u16EffAddr += pCtx->di; break;
13542 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13543 case 7: u16EffAddr += pCtx->bx; break;
13544 }
13545 }
13546
13547 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13548 return u16EffAddr;
13549 }
13550
13551 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13552 uint32_t u32EffAddr;
13553
13554 /* Handle the disp32 form with no registers first. */
13555 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13556 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13557 else
13558 {
13559 /* Get the register (or SIB) value. */
13560 switch ((bRm & X86_MODRM_RM_MASK))
13561 {
13562 case 0: u32EffAddr = pCtx->eax; break;
13563 case 1: u32EffAddr = pCtx->ecx; break;
13564 case 2: u32EffAddr = pCtx->edx; break;
13565 case 3: u32EffAddr = pCtx->ebx; break;
13566 case 4: /* SIB */
13567 {
13568 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13569
13570 /* Get the index and scale it. */
13571 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13572 {
13573 case 0: u32EffAddr = pCtx->eax; break;
13574 case 1: u32EffAddr = pCtx->ecx; break;
13575 case 2: u32EffAddr = pCtx->edx; break;
13576 case 3: u32EffAddr = pCtx->ebx; break;
13577 case 4: u32EffAddr = 0; /*none */ break;
13578 case 5: u32EffAddr = pCtx->ebp; break;
13579 case 6: u32EffAddr = pCtx->esi; break;
13580 case 7: u32EffAddr = pCtx->edi; break;
13581 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13582 }
13583 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13584
13585 /* add base */
13586 switch (bSib & X86_SIB_BASE_MASK)
13587 {
13588 case 0: u32EffAddr += pCtx->eax; break;
13589 case 1: u32EffAddr += pCtx->ecx; break;
13590 case 2: u32EffAddr += pCtx->edx; break;
13591 case 3: u32EffAddr += pCtx->ebx; break;
13592 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13593 case 5:
13594 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13595 {
13596 u32EffAddr += pCtx->ebp;
13597 SET_SS_DEF();
13598 }
13599 else
13600 {
13601 uint32_t u32Disp;
13602 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13603 u32EffAddr += u32Disp;
13604 }
13605 break;
13606 case 6: u32EffAddr += pCtx->esi; break;
13607 case 7: u32EffAddr += pCtx->edi; break;
13608 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13609 }
13610 break;
13611 }
13612 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13613 case 6: u32EffAddr = pCtx->esi; break;
13614 case 7: u32EffAddr = pCtx->edi; break;
13615 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13616 }
13617
13618 /* Get and add the displacement. */
13619 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13620 {
13621 case 0:
13622 break;
13623 case 1:
13624 {
13625 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13626 u32EffAddr += i8Disp;
13627 break;
13628 }
13629 case 2:
13630 {
13631 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13632 u32EffAddr += u32Disp;
13633 break;
13634 }
13635 default:
13636 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13637 }
13638 }
13639
13640 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13641 {
13642 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13643 return u32EffAddr;
13644 }
13645 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13646 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13647 return u32EffAddr & UINT16_MAX;
13648 }
13649
13650 uint64_t u64EffAddr;
13651
13652 /* Handle the rip+disp32 form with no registers first. */
13653 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13654 {
13655 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13656 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13657 }
13658 else
13659 {
13660 /* Get the register (or SIB) value. */
13661 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13662 {
13663 case 0: u64EffAddr = pCtx->rax; break;
13664 case 1: u64EffAddr = pCtx->rcx; break;
13665 case 2: u64EffAddr = pCtx->rdx; break;
13666 case 3: u64EffAddr = pCtx->rbx; break;
13667 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13668 case 6: u64EffAddr = pCtx->rsi; break;
13669 case 7: u64EffAddr = pCtx->rdi; break;
13670 case 8: u64EffAddr = pCtx->r8; break;
13671 case 9: u64EffAddr = pCtx->r9; break;
13672 case 10: u64EffAddr = pCtx->r10; break;
13673 case 11: u64EffAddr = pCtx->r11; break;
13674 case 13: u64EffAddr = pCtx->r13; break;
13675 case 14: u64EffAddr = pCtx->r14; break;
13676 case 15: u64EffAddr = pCtx->r15; break;
13677 /* SIB */
13678 case 4:
13679 case 12:
13680 {
13681 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13682
13683 /* Get the index and scale it. */
13684 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13685 {
13686 case 0: u64EffAddr = pCtx->rax; break;
13687 case 1: u64EffAddr = pCtx->rcx; break;
13688 case 2: u64EffAddr = pCtx->rdx; break;
13689 case 3: u64EffAddr = pCtx->rbx; break;
13690 case 4: u64EffAddr = 0; /*none */ break;
13691 case 5: u64EffAddr = pCtx->rbp; break;
13692 case 6: u64EffAddr = pCtx->rsi; break;
13693 case 7: u64EffAddr = pCtx->rdi; break;
13694 case 8: u64EffAddr = pCtx->r8; break;
13695 case 9: u64EffAddr = pCtx->r9; break;
13696 case 10: u64EffAddr = pCtx->r10; break;
13697 case 11: u64EffAddr = pCtx->r11; break;
13698 case 12: u64EffAddr = pCtx->r12; break;
13699 case 13: u64EffAddr = pCtx->r13; break;
13700 case 14: u64EffAddr = pCtx->r14; break;
13701 case 15: u64EffAddr = pCtx->r15; break;
13702 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13703 }
13704 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13705
13706 /* add base */
13707 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13708 {
13709 case 0: u64EffAddr += pCtx->rax; break;
13710 case 1: u64EffAddr += pCtx->rcx; break;
13711 case 2: u64EffAddr += pCtx->rdx; break;
13712 case 3: u64EffAddr += pCtx->rbx; break;
13713 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13714 case 6: u64EffAddr += pCtx->rsi; break;
13715 case 7: u64EffAddr += pCtx->rdi; break;
13716 case 8: u64EffAddr += pCtx->r8; break;
13717 case 9: u64EffAddr += pCtx->r9; break;
13718 case 10: u64EffAddr += pCtx->r10; break;
13719 case 11: u64EffAddr += pCtx->r11; break;
13720 case 12: u64EffAddr += pCtx->r12; break;
13721 case 14: u64EffAddr += pCtx->r14; break;
13722 case 15: u64EffAddr += pCtx->r15; break;
13723 /* complicated encodings */
13724 case 5:
13725 case 13:
13726 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13727 {
13728 if (!pVCpu->iem.s.uRexB)
13729 {
13730 u64EffAddr += pCtx->rbp;
13731 SET_SS_DEF();
13732 }
13733 else
13734 u64EffAddr += pCtx->r13;
13735 }
13736 else
13737 {
13738 uint32_t u32Disp;
13739 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13740 u64EffAddr += (int32_t)u32Disp;
13741 }
13742 break;
13743 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13744 }
13745 break;
13746 }
13747 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13748 }
13749
13750 /* Get and add the displacement. */
13751 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13752 {
13753 case 0:
13754 break;
13755 case 1:
13756 {
13757 int8_t i8Disp;
13758 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13759 u64EffAddr += i8Disp;
13760 break;
13761 }
13762 case 2:
13763 {
13764 uint32_t u32Disp;
13765 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13766 u64EffAddr += (int32_t)u32Disp;
13767 break;
13768 }
13769 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13770 }
13771
13772 }
13773
13774 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13775 {
13776 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13777 return u64EffAddr;
13778 }
13779 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13780 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13781 return u64EffAddr & UINT32_MAX;
13782}
13783#endif /* IEM_WITH_SETJMP */
13784
13785
13786/** @} */
13787
13788
13789
13790/*
13791 * Include the instructions
13792 */
13793#include "IEMAllInstructions.cpp.h"
13794
13795
13796
13797
13798#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13799
13800/**
13801 * Sets up execution verification mode.
13802 */
13803IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13804{
13805 PVMCPU pVCpu = pVCpu;
13806 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13807
13808 /*
13809 * Always note down the address of the current instruction.
13810 */
13811 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13812 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13813
13814 /*
13815 * Enable verification and/or logging.
13816 */
13817 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13818 if ( fNewNoRem
13819 && ( 0
13820#if 0 /* auto enable on first paged protected mode interrupt */
13821 || ( pOrgCtx->eflags.Bits.u1IF
13822 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13823 && TRPMHasTrap(pVCpu)
13824 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13825#endif
13826#if 0
13827 || ( pOrgCtx->cs == 0x10
13828 && ( pOrgCtx->rip == 0x90119e3e
13829 || pOrgCtx->rip == 0x901d9810)
13830#endif
13831#if 0 /* Auto enable DSL - FPU stuff. */
13832 || ( pOrgCtx->cs == 0x10
13833 && (// pOrgCtx->rip == 0xc02ec07f
13834 //|| pOrgCtx->rip == 0xc02ec082
13835 //|| pOrgCtx->rip == 0xc02ec0c9
13836 0
13837 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13838#endif
13839#if 0 /* Auto enable DSL - fstp st0 stuff. */
13840 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13841#endif
13842#if 0
13843 || pOrgCtx->rip == 0x9022bb3a
13844#endif
13845#if 0
13846 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13847#endif
13848#if 0
13849 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13850 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13851#endif
13852#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13853 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13854 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13855 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13856#endif
13857#if 0 /* NT4SP1 - xadd early boot. */
13858 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13859#endif
13860#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13861 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13862#endif
13863#if 0 /* NT4SP1 - cmpxchg (AMD). */
13864 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13865#endif
13866#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13867 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13868#endif
13869#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13870 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13871
13872#endif
13873#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13874 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13875
13876#endif
13877#if 0 /* NT4SP1 - frstor [ecx] */
13878 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13879#endif
13880#if 0 /* xxxxxx - All long mode code. */
13881 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13882#endif
13883#if 0 /* rep movsq linux 3.7 64-bit boot. */
13884 || (pOrgCtx->rip == 0x0000000000100241)
13885#endif
13886#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13887 || (pOrgCtx->rip == 0x000000000215e240)
13888#endif
13889#if 0 /* DOS's size-overridden iret to v8086. */
13890 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13891#endif
13892 )
13893 )
13894 {
13895 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13896 RTLogFlags(NULL, "enabled");
13897 fNewNoRem = false;
13898 }
13899 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13900 {
13901 pVCpu->iem.s.fNoRem = fNewNoRem;
13902 if (!fNewNoRem)
13903 {
13904 LogAlways(("Enabling verification mode!\n"));
13905 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13906 }
13907 else
13908 LogAlways(("Disabling verification mode!\n"));
13909 }
13910
13911 /*
13912 * Switch state.
13913 */
13914 if (IEM_VERIFICATION_ENABLED(pVCpu))
13915 {
13916 static CPUMCTX s_DebugCtx; /* Ugly! */
13917
13918 s_DebugCtx = *pOrgCtx;
13919 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13920 }
13921
13922 /*
13923 * See if there is an interrupt pending in TRPM and inject it if we can.
13924 */
13925 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13926 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
13927#if defined(VBOX_WITH_NESTED_HWVIRT)
13928 bool fIntrEnabled = pOrgCtx->hwvirt.Gif;
13929 if (fIntrEnabled)
13930 {
13931 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
13932 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
13933 else
13934 fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13935 }
13936#else
13937 bool fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13938#endif
13939 if ( fIntrEnabled
13940 && TRPMHasTrap(pVCpu)
13941 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13942 {
13943 uint8_t u8TrapNo;
13944 TRPMEVENT enmType;
13945 RTGCUINT uErrCode;
13946 RTGCPTR uCr2;
13947 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13948 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13949 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13950 TRPMResetTrap(pVCpu);
13951 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13952 }
13953
13954 /*
13955 * Reset the counters.
13956 */
13957 pVCpu->iem.s.cIOReads = 0;
13958 pVCpu->iem.s.cIOWrites = 0;
13959 pVCpu->iem.s.fIgnoreRaxRdx = false;
13960 pVCpu->iem.s.fOverlappingMovs = false;
13961 pVCpu->iem.s.fProblematicMemory = false;
13962 pVCpu->iem.s.fUndefinedEFlags = 0;
13963
13964 if (IEM_VERIFICATION_ENABLED(pVCpu))
13965 {
13966 /*
13967 * Free all verification records.
13968 */
13969 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13970 pVCpu->iem.s.pIemEvtRecHead = NULL;
13971 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13972 do
13973 {
13974 while (pEvtRec)
13975 {
13976 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13977 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13978 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13979 pEvtRec = pNext;
13980 }
13981 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13982 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13983 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13984 } while (pEvtRec);
13985 }
13986}
13987
13988
13989/**
13990 * Allocate an event record.
13991 * @returns Pointer to a record.
13992 */
13993IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13994{
13995 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13996 return NULL;
13997
13998 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13999 if (pEvtRec)
14000 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
14001 else
14002 {
14003 if (!pVCpu->iem.s.ppIemEvtRecNext)
14004 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
14005
14006 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
14007 if (!pEvtRec)
14008 return NULL;
14009 }
14010 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
14011 pEvtRec->pNext = NULL;
14012 return pEvtRec;
14013}
14014
14015
14016/**
14017 * IOMMMIORead notification.
14018 */
14019VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
14020{
14021 PVMCPU pVCpu = VMMGetCpu(pVM);
14022 if (!pVCpu)
14023 return;
14024 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14025 if (!pEvtRec)
14026 return;
14027 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
14028 pEvtRec->u.RamRead.GCPhys = GCPhys;
14029 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
14030 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14031 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14032}
14033
14034
14035/**
14036 * IOMMMIOWrite notification.
14037 */
14038VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
14039{
14040 PVMCPU pVCpu = VMMGetCpu(pVM);
14041 if (!pVCpu)
14042 return;
14043 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14044 if (!pEvtRec)
14045 return;
14046 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
14047 pEvtRec->u.RamWrite.GCPhys = GCPhys;
14048 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
14049 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
14050 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
14051 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
14052 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
14053 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14054 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14055}
14056
14057
14058/**
14059 * IOMIOPortRead notification.
14060 */
14061VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
14062{
14063 PVMCPU pVCpu = VMMGetCpu(pVM);
14064 if (!pVCpu)
14065 return;
14066 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14067 if (!pEvtRec)
14068 return;
14069 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14070 pEvtRec->u.IOPortRead.Port = Port;
14071 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14072 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14073 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14074}
14075
14076/**
14077 * IOMIOPortWrite notification.
14078 */
14079VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14080{
14081 PVMCPU pVCpu = VMMGetCpu(pVM);
14082 if (!pVCpu)
14083 return;
14084 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14085 if (!pEvtRec)
14086 return;
14087 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14088 pEvtRec->u.IOPortWrite.Port = Port;
14089 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14090 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14091 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14092 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14093}
14094
14095
14096VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
14097{
14098 PVMCPU pVCpu = VMMGetCpu(pVM);
14099 if (!pVCpu)
14100 return;
14101 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14102 if (!pEvtRec)
14103 return;
14104 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
14105 pEvtRec->u.IOPortStrRead.Port = Port;
14106 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
14107 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
14108 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14109 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14110}
14111
14112
14113VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
14114{
14115 PVMCPU pVCpu = VMMGetCpu(pVM);
14116 if (!pVCpu)
14117 return;
14118 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14119 if (!pEvtRec)
14120 return;
14121 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
14122 pEvtRec->u.IOPortStrWrite.Port = Port;
14123 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
14124 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
14125 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14126 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14127}
14128
14129
14130/**
14131 * Fakes and records an I/O port read.
14132 *
14133 * @returns VINF_SUCCESS.
14134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14135 * @param Port The I/O port.
14136 * @param pu32Value Where to store the fake value.
14137 * @param cbValue The size of the access.
14138 */
14139IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14140{
14141 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14142 if (pEvtRec)
14143 {
14144 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14145 pEvtRec->u.IOPortRead.Port = Port;
14146 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14147 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14148 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14149 }
14150 pVCpu->iem.s.cIOReads++;
14151 *pu32Value = 0xcccccccc;
14152 return VINF_SUCCESS;
14153}
14154
14155
14156/**
14157 * Fakes and records an I/O port write.
14158 *
14159 * @returns VINF_SUCCESS.
14160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14161 * @param Port The I/O port.
14162 * @param u32Value The value being written.
14163 * @param cbValue The size of the access.
14164 */
14165IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14166{
14167 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14168 if (pEvtRec)
14169 {
14170 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14171 pEvtRec->u.IOPortWrite.Port = Port;
14172 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14173 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14174 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14175 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14176 }
14177 pVCpu->iem.s.cIOWrites++;
14178 return VINF_SUCCESS;
14179}
14180
14181
14182/**
14183 * Used to add extra details about a stub case.
14184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14185 */
14186IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
14187{
14188 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14189 PVM pVM = pVCpu->CTX_SUFF(pVM);
14190 PVMCPU pVCpu = pVCpu;
14191 char szRegs[4096];
14192 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
14193 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
14194 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
14195 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
14196 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
14197 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
14198 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
14199 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
14200 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
14201 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
14202 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
14203 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
14204 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
14205 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
14206 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
14207 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
14208 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
14209 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
14210 " efer=%016VR{efer}\n"
14211 " pat=%016VR{pat}\n"
14212 " sf_mask=%016VR{sf_mask}\n"
14213 "krnl_gs_base=%016VR{krnl_gs_base}\n"
14214 " lstar=%016VR{lstar}\n"
14215 " star=%016VR{star} cstar=%016VR{cstar}\n"
14216 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
14217 );
14218
14219 char szInstr1[256];
14220 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
14221 DBGF_DISAS_FLAGS_DEFAULT_MODE,
14222 szInstr1, sizeof(szInstr1), NULL);
14223 char szInstr2[256];
14224 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
14225 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14226 szInstr2, sizeof(szInstr2), NULL);
14227
14228 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
14229}
14230
14231
14232/**
14233 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
14234 * dump to the assertion info.
14235 *
14236 * @param pEvtRec The record to dump.
14237 */
14238IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
14239{
14240 switch (pEvtRec->enmEvent)
14241 {
14242 case IEMVERIFYEVENT_IOPORT_READ:
14243 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
14244 pEvtRec->u.IOPortWrite.Port,
14245 pEvtRec->u.IOPortWrite.cbValue);
14246 break;
14247 case IEMVERIFYEVENT_IOPORT_WRITE:
14248 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
14249 pEvtRec->u.IOPortWrite.Port,
14250 pEvtRec->u.IOPortWrite.cbValue,
14251 pEvtRec->u.IOPortWrite.u32Value);
14252 break;
14253 case IEMVERIFYEVENT_IOPORT_STR_READ:
14254 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
14255 pEvtRec->u.IOPortStrWrite.Port,
14256 pEvtRec->u.IOPortStrWrite.cbValue,
14257 pEvtRec->u.IOPortStrWrite.cTransfers);
14258 break;
14259 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14260 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
14261 pEvtRec->u.IOPortStrWrite.Port,
14262 pEvtRec->u.IOPortStrWrite.cbValue,
14263 pEvtRec->u.IOPortStrWrite.cTransfers);
14264 break;
14265 case IEMVERIFYEVENT_RAM_READ:
14266 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
14267 pEvtRec->u.RamRead.GCPhys,
14268 pEvtRec->u.RamRead.cb);
14269 break;
14270 case IEMVERIFYEVENT_RAM_WRITE:
14271 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
14272 pEvtRec->u.RamWrite.GCPhys,
14273 pEvtRec->u.RamWrite.cb,
14274 (int)pEvtRec->u.RamWrite.cb,
14275 pEvtRec->u.RamWrite.ab);
14276 break;
14277 default:
14278 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14279 break;
14280 }
14281}
14282
14283
14284/**
14285 * Raises an assertion on the specified record, showing the given message with
14286 * a record dump attached.
14287 *
14288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14289 * @param pEvtRec1 The first record.
14290 * @param pEvtRec2 The second record.
14291 * @param pszMsg The message explaining why we're asserting.
14292 */
14293IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14294{
14295 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14296 iemVerifyAssertAddRecordDump(pEvtRec1);
14297 iemVerifyAssertAddRecordDump(pEvtRec2);
14298 iemVerifyAssertMsg2(pVCpu);
14299 RTAssertPanic();
14300}
14301
14302
14303/**
14304 * Raises an assertion on the specified record, showing the given message with
14305 * a record dump attached.
14306 *
14307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14308 * @param pEvtRec1 The first record.
14309 * @param pszMsg The message explaining why we're asserting.
14310 */
14311IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14312{
14313 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14314 iemVerifyAssertAddRecordDump(pEvtRec);
14315 iemVerifyAssertMsg2(pVCpu);
14316 RTAssertPanic();
14317}
14318
14319
14320/**
14321 * Verifies a write record.
14322 *
14323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14324 * @param pEvtRec The write record.
14325 * @param fRem Set if REM was doing the other executing. If clear
14326 * it was HM.
14327 */
14328IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14329{
14330 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14331 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14332 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14333 if ( RT_FAILURE(rc)
14334 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14335 {
14336 /* fend off ins */
14337 if ( !pVCpu->iem.s.cIOReads
14338 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14339 || ( pEvtRec->u.RamWrite.cb != 1
14340 && pEvtRec->u.RamWrite.cb != 2
14341 && pEvtRec->u.RamWrite.cb != 4) )
14342 {
14343 /* fend off ROMs and MMIO */
14344 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14345 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14346 {
14347 /* fend off fxsave */
14348 if (pEvtRec->u.RamWrite.cb != 512)
14349 {
14350 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14351 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14352 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14353 RTAssertMsg2Add("%s: %.*Rhxs\n"
14354 "iem: %.*Rhxs\n",
14355 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14356 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14357 iemVerifyAssertAddRecordDump(pEvtRec);
14358 iemVerifyAssertMsg2(pVCpu);
14359 RTAssertPanic();
14360 }
14361 }
14362 }
14363 }
14364
14365}
14366
14367/**
14368 * Performs the post-execution verfication checks.
14369 */
14370IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14371{
14372 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14373 return rcStrictIem;
14374
14375 /*
14376 * Switch back the state.
14377 */
14378 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14379 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14380 Assert(pOrgCtx != pDebugCtx);
14381 IEM_GET_CTX(pVCpu) = pOrgCtx;
14382
14383 /*
14384 * Execute the instruction in REM.
14385 */
14386 bool fRem = false;
14387 PVM pVM = pVCpu->CTX_SUFF(pVM);
14388 PVMCPU pVCpu = pVCpu;
14389 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14390#ifdef IEM_VERIFICATION_MODE_FULL_HM
14391 if ( HMIsEnabled(pVM)
14392 && pVCpu->iem.s.cIOReads == 0
14393 && pVCpu->iem.s.cIOWrites == 0
14394 && !pVCpu->iem.s.fProblematicMemory)
14395 {
14396 uint64_t uStartRip = pOrgCtx->rip;
14397 unsigned iLoops = 0;
14398 do
14399 {
14400 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14401 iLoops++;
14402 } while ( rc == VINF_SUCCESS
14403 || ( rc == VINF_EM_DBG_STEPPED
14404 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14405 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14406 || ( pOrgCtx->rip != pDebugCtx->rip
14407 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14408 && iLoops < 8) );
14409 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14410 rc = VINF_SUCCESS;
14411 }
14412#endif
14413 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14414 || rc == VINF_IOM_R3_IOPORT_READ
14415 || rc == VINF_IOM_R3_IOPORT_WRITE
14416 || rc == VINF_IOM_R3_MMIO_READ
14417 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14418 || rc == VINF_IOM_R3_MMIO_WRITE
14419 || rc == VINF_CPUM_R3_MSR_READ
14420 || rc == VINF_CPUM_R3_MSR_WRITE
14421 || rc == VINF_EM_RESCHEDULE
14422 )
14423 {
14424 EMRemLock(pVM);
14425 rc = REMR3EmulateInstruction(pVM, pVCpu);
14426 AssertRC(rc);
14427 EMRemUnlock(pVM);
14428 fRem = true;
14429 }
14430
14431# if 1 /* Skip unimplemented instructions for now. */
14432 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14433 {
14434 IEM_GET_CTX(pVCpu) = pOrgCtx;
14435 if (rc == VINF_EM_DBG_STEPPED)
14436 return VINF_SUCCESS;
14437 return rc;
14438 }
14439# endif
14440
14441 /*
14442 * Compare the register states.
14443 */
14444 unsigned cDiffs = 0;
14445 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14446 {
14447 //Log(("REM and IEM ends up with different registers!\n"));
14448 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14449
14450# define CHECK_FIELD(a_Field) \
14451 do \
14452 { \
14453 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14454 { \
14455 switch (sizeof(pOrgCtx->a_Field)) \
14456 { \
14457 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14458 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14459 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14460 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14461 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14462 } \
14463 cDiffs++; \
14464 } \
14465 } while (0)
14466# define CHECK_XSTATE_FIELD(a_Field) \
14467 do \
14468 { \
14469 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14470 { \
14471 switch (sizeof(pOrgXState->a_Field)) \
14472 { \
14473 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14474 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14475 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14476 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14477 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14478 } \
14479 cDiffs++; \
14480 } \
14481 } while (0)
14482
14483# define CHECK_BIT_FIELD(a_Field) \
14484 do \
14485 { \
14486 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14487 { \
14488 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14489 cDiffs++; \
14490 } \
14491 } while (0)
14492
14493# define CHECK_SEL(a_Sel) \
14494 do \
14495 { \
14496 CHECK_FIELD(a_Sel.Sel); \
14497 CHECK_FIELD(a_Sel.Attr.u); \
14498 CHECK_FIELD(a_Sel.u64Base); \
14499 CHECK_FIELD(a_Sel.u32Limit); \
14500 CHECK_FIELD(a_Sel.fFlags); \
14501 } while (0)
14502
14503 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14504 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14505
14506#if 1 /* The recompiler doesn't update these the intel way. */
14507 if (fRem)
14508 {
14509 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14510 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14511 pOrgXState->x87.CS = pDebugXState->x87.CS;
14512 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14513 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14514 pOrgXState->x87.DS = pDebugXState->x87.DS;
14515 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14516 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14517 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14518 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14519 }
14520#endif
14521 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14522 {
14523 RTAssertMsg2Weak(" the FPU state differs\n");
14524 cDiffs++;
14525 CHECK_XSTATE_FIELD(x87.FCW);
14526 CHECK_XSTATE_FIELD(x87.FSW);
14527 CHECK_XSTATE_FIELD(x87.FTW);
14528 CHECK_XSTATE_FIELD(x87.FOP);
14529 CHECK_XSTATE_FIELD(x87.FPUIP);
14530 CHECK_XSTATE_FIELD(x87.CS);
14531 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14532 CHECK_XSTATE_FIELD(x87.FPUDP);
14533 CHECK_XSTATE_FIELD(x87.DS);
14534 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14535 CHECK_XSTATE_FIELD(x87.MXCSR);
14536 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14537 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14538 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14539 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14540 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14541 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14542 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14543 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14544 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14545 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14546 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14547 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14548 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14549 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14550 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14551 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14552 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14553 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14554 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14555 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14556 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14557 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14558 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14559 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14560 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14561 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14562 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14563 }
14564 CHECK_FIELD(rip);
14565 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14566 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14567 {
14568 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14569 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14570 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14571 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14572 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14573 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14574 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14575 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14576 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14577 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14578 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14579 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14580 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14581 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14582 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14583 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14584 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14585 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14586 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14587 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14588 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14589 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14590 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14591 }
14592
14593 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14594 CHECK_FIELD(rax);
14595 CHECK_FIELD(rcx);
14596 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14597 CHECK_FIELD(rdx);
14598 CHECK_FIELD(rbx);
14599 CHECK_FIELD(rsp);
14600 CHECK_FIELD(rbp);
14601 CHECK_FIELD(rsi);
14602 CHECK_FIELD(rdi);
14603 CHECK_FIELD(r8);
14604 CHECK_FIELD(r9);
14605 CHECK_FIELD(r10);
14606 CHECK_FIELD(r11);
14607 CHECK_FIELD(r12);
14608 CHECK_FIELD(r13);
14609 CHECK_SEL(cs);
14610 CHECK_SEL(ss);
14611 CHECK_SEL(ds);
14612 CHECK_SEL(es);
14613 CHECK_SEL(fs);
14614 CHECK_SEL(gs);
14615 CHECK_FIELD(cr0);
14616
14617 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14618 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14619 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14620 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14621 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14622 {
14623 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14624 { /* ignore */ }
14625 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14626 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14627 && fRem)
14628 { /* ignore */ }
14629 else
14630 CHECK_FIELD(cr2);
14631 }
14632 CHECK_FIELD(cr3);
14633 CHECK_FIELD(cr4);
14634 CHECK_FIELD(dr[0]);
14635 CHECK_FIELD(dr[1]);
14636 CHECK_FIELD(dr[2]);
14637 CHECK_FIELD(dr[3]);
14638 CHECK_FIELD(dr[6]);
14639 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14640 CHECK_FIELD(dr[7]);
14641 CHECK_FIELD(gdtr.cbGdt);
14642 CHECK_FIELD(gdtr.pGdt);
14643 CHECK_FIELD(idtr.cbIdt);
14644 CHECK_FIELD(idtr.pIdt);
14645 CHECK_SEL(ldtr);
14646 CHECK_SEL(tr);
14647 CHECK_FIELD(SysEnter.cs);
14648 CHECK_FIELD(SysEnter.eip);
14649 CHECK_FIELD(SysEnter.esp);
14650 CHECK_FIELD(msrEFER);
14651 CHECK_FIELD(msrSTAR);
14652 CHECK_FIELD(msrPAT);
14653 CHECK_FIELD(msrLSTAR);
14654 CHECK_FIELD(msrCSTAR);
14655 CHECK_FIELD(msrSFMASK);
14656 CHECK_FIELD(msrKERNELGSBASE);
14657
14658 if (cDiffs != 0)
14659 {
14660 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14661 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14662 RTAssertPanic();
14663 static bool volatile s_fEnterDebugger = true;
14664 if (s_fEnterDebugger)
14665 DBGFSTOP(pVM);
14666
14667# if 1 /* Ignore unimplemented instructions for now. */
14668 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14669 rcStrictIem = VINF_SUCCESS;
14670# endif
14671 }
14672# undef CHECK_FIELD
14673# undef CHECK_BIT_FIELD
14674 }
14675
14676 /*
14677 * If the register state compared fine, check the verification event
14678 * records.
14679 */
14680 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14681 {
14682 /*
14683 * Compare verficiation event records.
14684 * - I/O port accesses should be a 1:1 match.
14685 */
14686 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14687 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14688 while (pIemRec && pOtherRec)
14689 {
14690 /* Since we might miss RAM writes and reads, ignore reads and check
14691 that any written memory is the same extra ones. */
14692 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14693 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14694 && pIemRec->pNext)
14695 {
14696 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14697 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14698 pIemRec = pIemRec->pNext;
14699 }
14700
14701 /* Do the compare. */
14702 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14703 {
14704 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14705 break;
14706 }
14707 bool fEquals;
14708 switch (pIemRec->enmEvent)
14709 {
14710 case IEMVERIFYEVENT_IOPORT_READ:
14711 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14712 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14713 break;
14714 case IEMVERIFYEVENT_IOPORT_WRITE:
14715 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14716 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14717 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14718 break;
14719 case IEMVERIFYEVENT_IOPORT_STR_READ:
14720 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14721 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14722 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14723 break;
14724 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14725 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14726 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14727 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14728 break;
14729 case IEMVERIFYEVENT_RAM_READ:
14730 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14731 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14732 break;
14733 case IEMVERIFYEVENT_RAM_WRITE:
14734 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14735 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14736 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14737 break;
14738 default:
14739 fEquals = false;
14740 break;
14741 }
14742 if (!fEquals)
14743 {
14744 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14745 break;
14746 }
14747
14748 /* advance */
14749 pIemRec = pIemRec->pNext;
14750 pOtherRec = pOtherRec->pNext;
14751 }
14752
14753 /* Ignore extra writes and reads. */
14754 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14755 {
14756 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14757 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14758 pIemRec = pIemRec->pNext;
14759 }
14760 if (pIemRec != NULL)
14761 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14762 else if (pOtherRec != NULL)
14763 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14764 }
14765 IEM_GET_CTX(pVCpu) = pOrgCtx;
14766
14767 return rcStrictIem;
14768}
14769
14770#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14771
14772/* stubs */
14773IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14774{
14775 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14776 return VERR_INTERNAL_ERROR;
14777}
14778
14779IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14780{
14781 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14782 return VERR_INTERNAL_ERROR;
14783}
14784
14785#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14786
14787
14788#ifdef LOG_ENABLED
14789/**
14790 * Logs the current instruction.
14791 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14792 * @param pCtx The current CPU context.
14793 * @param fSameCtx Set if we have the same context information as the VMM,
14794 * clear if we may have already executed an instruction in
14795 * our debug context. When clear, we assume IEMCPU holds
14796 * valid CPU mode info.
14797 */
14798IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14799{
14800# ifdef IN_RING3
14801 if (LogIs2Enabled())
14802 {
14803 char szInstr[256];
14804 uint32_t cbInstr = 0;
14805 if (fSameCtx)
14806 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14807 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14808 szInstr, sizeof(szInstr), &cbInstr);
14809 else
14810 {
14811 uint32_t fFlags = 0;
14812 switch (pVCpu->iem.s.enmCpuMode)
14813 {
14814 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14815 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14816 case IEMMODE_16BIT:
14817 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14818 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14819 else
14820 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14821 break;
14822 }
14823 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14824 szInstr, sizeof(szInstr), &cbInstr);
14825 }
14826
14827 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14828 Log2(("****\n"
14829 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14830 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14831 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14832 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14833 " %s\n"
14834 ,
14835 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14836 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14837 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14838 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14839 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14840 szInstr));
14841
14842 if (LogIs3Enabled())
14843 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14844 }
14845 else
14846# endif
14847 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14848 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14849 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14850}
14851#endif
14852
14853
14854/**
14855 * Makes status code addjustments (pass up from I/O and access handler)
14856 * as well as maintaining statistics.
14857 *
14858 * @returns Strict VBox status code to pass up.
14859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14860 * @param rcStrict The status from executing an instruction.
14861 */
14862DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14863{
14864 if (rcStrict != VINF_SUCCESS)
14865 {
14866 if (RT_SUCCESS(rcStrict))
14867 {
14868 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14869 || rcStrict == VINF_IOM_R3_IOPORT_READ
14870 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14871 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14872 || rcStrict == VINF_IOM_R3_MMIO_READ
14873 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14874 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14875 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14876 || rcStrict == VINF_CPUM_R3_MSR_READ
14877 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14878 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14879 || rcStrict == VINF_EM_RAW_TO_R3
14880 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14881 || rcStrict == VINF_EM_TRIPLE_FAULT
14882 /* raw-mode / virt handlers only: */
14883 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14884 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14885 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14886 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14887 || rcStrict == VINF_SELM_SYNC_GDT
14888 || rcStrict == VINF_CSAM_PENDING_ACTION
14889 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14890 /* nested hw.virt codes: */
14891 || rcStrict == VINF_SVM_VMEXIT
14892 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14893/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14894 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14895#ifdef VBOX_WITH_NESTED_HWVIRT
14896 if ( rcStrict == VINF_SVM_VMEXIT
14897 && rcPassUp == VINF_SUCCESS)
14898 rcStrict = VINF_SUCCESS;
14899 else
14900#endif
14901 if (rcPassUp == VINF_SUCCESS)
14902 pVCpu->iem.s.cRetInfStatuses++;
14903 else if ( rcPassUp < VINF_EM_FIRST
14904 || rcPassUp > VINF_EM_LAST
14905 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14906 {
14907 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14908 pVCpu->iem.s.cRetPassUpStatus++;
14909 rcStrict = rcPassUp;
14910 }
14911 else
14912 {
14913 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14914 pVCpu->iem.s.cRetInfStatuses++;
14915 }
14916 }
14917 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14918 pVCpu->iem.s.cRetAspectNotImplemented++;
14919 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14920 pVCpu->iem.s.cRetInstrNotImplemented++;
14921#ifdef IEM_VERIFICATION_MODE_FULL
14922 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14923 rcStrict = VINF_SUCCESS;
14924#endif
14925 else
14926 pVCpu->iem.s.cRetErrStatuses++;
14927 }
14928 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14929 {
14930 pVCpu->iem.s.cRetPassUpStatus++;
14931 rcStrict = pVCpu->iem.s.rcPassUp;
14932 }
14933
14934 return rcStrict;
14935}
14936
14937
14938/**
14939 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14940 * IEMExecOneWithPrefetchedByPC.
14941 *
14942 * Similar code is found in IEMExecLots.
14943 *
14944 * @return Strict VBox status code.
14945 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14947 * @param fExecuteInhibit If set, execute the instruction following CLI,
14948 * POP SS and MOV SS,GR.
14949 */
14950DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14951{
14952#ifdef IEM_WITH_SETJMP
14953 VBOXSTRICTRC rcStrict;
14954 jmp_buf JmpBuf;
14955 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14956 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14957 if ((rcStrict = setjmp(JmpBuf)) == 0)
14958 {
14959 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14960 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14961 }
14962 else
14963 pVCpu->iem.s.cLongJumps++;
14964 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14965#else
14966 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14967 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14968#endif
14969 if (rcStrict == VINF_SUCCESS)
14970 pVCpu->iem.s.cInstructions++;
14971 if (pVCpu->iem.s.cActiveMappings > 0)
14972 {
14973 Assert(rcStrict != VINF_SUCCESS);
14974 iemMemRollback(pVCpu);
14975 }
14976//#ifdef DEBUG
14977// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14978//#endif
14979
14980 /* Execute the next instruction as well if a cli, pop ss or
14981 mov ss, Gr has just completed successfully. */
14982 if ( fExecuteInhibit
14983 && rcStrict == VINF_SUCCESS
14984 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14985 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14986 {
14987 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14988 if (rcStrict == VINF_SUCCESS)
14989 {
14990#ifdef LOG_ENABLED
14991 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14992#endif
14993#ifdef IEM_WITH_SETJMP
14994 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14995 if ((rcStrict = setjmp(JmpBuf)) == 0)
14996 {
14997 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14998 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14999 }
15000 else
15001 pVCpu->iem.s.cLongJumps++;
15002 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15003#else
15004 IEM_OPCODE_GET_NEXT_U8(&b);
15005 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15006#endif
15007 if (rcStrict == VINF_SUCCESS)
15008 pVCpu->iem.s.cInstructions++;
15009 if (pVCpu->iem.s.cActiveMappings > 0)
15010 {
15011 Assert(rcStrict != VINF_SUCCESS);
15012 iemMemRollback(pVCpu);
15013 }
15014 }
15015 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
15016 }
15017
15018 /*
15019 * Return value fiddling, statistics and sanity assertions.
15020 */
15021 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15022
15023 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15025#if defined(IEM_VERIFICATION_MODE_FULL)
15026 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15027 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15028 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15029 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15030#endif
15031 return rcStrict;
15032}
15033
15034
15035#ifdef IN_RC
15036/**
15037 * Re-enters raw-mode or ensure we return to ring-3.
15038 *
15039 * @returns rcStrict, maybe modified.
15040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15041 * @param pCtx The current CPU context.
15042 * @param rcStrict The status code returne by the interpreter.
15043 */
15044DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
15045{
15046 if ( !pVCpu->iem.s.fInPatchCode
15047 && ( rcStrict == VINF_SUCCESS
15048 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
15049 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
15050 {
15051 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
15052 CPUMRawEnter(pVCpu);
15053 else
15054 {
15055 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
15056 rcStrict = VINF_EM_RESCHEDULE;
15057 }
15058 }
15059 return rcStrict;
15060}
15061#endif
15062
15063
15064/**
15065 * Execute one instruction.
15066 *
15067 * @return Strict VBox status code.
15068 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15069 */
15070VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
15071{
15072#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15073 if (++pVCpu->iem.s.cVerifyDepth == 1)
15074 iemExecVerificationModeSetup(pVCpu);
15075#endif
15076#ifdef LOG_ENABLED
15077 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15078 iemLogCurInstr(pVCpu, pCtx, true);
15079#endif
15080
15081 /*
15082 * Do the decoding and emulation.
15083 */
15084 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15085 if (rcStrict == VINF_SUCCESS)
15086 rcStrict = iemExecOneInner(pVCpu, true);
15087
15088#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15089 /*
15090 * Assert some sanity.
15091 */
15092 if (pVCpu->iem.s.cVerifyDepth == 1)
15093 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15094 pVCpu->iem.s.cVerifyDepth--;
15095#endif
15096#ifdef IN_RC
15097 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15098#endif
15099 if (rcStrict != VINF_SUCCESS)
15100 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15101 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15102 return rcStrict;
15103}
15104
15105
15106VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15107{
15108 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15109 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15110
15111 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15112 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15113 if (rcStrict == VINF_SUCCESS)
15114 {
15115 rcStrict = iemExecOneInner(pVCpu, true);
15116 if (pcbWritten)
15117 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15118 }
15119
15120#ifdef IN_RC
15121 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15122#endif
15123 return rcStrict;
15124}
15125
15126
15127VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15128 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15129{
15130 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15131 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15132
15133 VBOXSTRICTRC rcStrict;
15134 if ( cbOpcodeBytes
15135 && pCtx->rip == OpcodeBytesPC)
15136 {
15137 iemInitDecoder(pVCpu, false);
15138#ifdef IEM_WITH_CODE_TLB
15139 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15140 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15141 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15142 pVCpu->iem.s.offCurInstrStart = 0;
15143 pVCpu->iem.s.offInstrNextByte = 0;
15144#else
15145 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15146 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15147#endif
15148 rcStrict = VINF_SUCCESS;
15149 }
15150 else
15151 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15152 if (rcStrict == VINF_SUCCESS)
15153 {
15154 rcStrict = iemExecOneInner(pVCpu, true);
15155 }
15156
15157#ifdef IN_RC
15158 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15159#endif
15160 return rcStrict;
15161}
15162
15163
15164VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15165{
15166 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15167 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15168
15169 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15170 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15171 if (rcStrict == VINF_SUCCESS)
15172 {
15173 rcStrict = iemExecOneInner(pVCpu, false);
15174 if (pcbWritten)
15175 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15176 }
15177
15178#ifdef IN_RC
15179 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15180#endif
15181 return rcStrict;
15182}
15183
15184
15185VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15186 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15187{
15188 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15189 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15190
15191 VBOXSTRICTRC rcStrict;
15192 if ( cbOpcodeBytes
15193 && pCtx->rip == OpcodeBytesPC)
15194 {
15195 iemInitDecoder(pVCpu, true);
15196#ifdef IEM_WITH_CODE_TLB
15197 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15198 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15199 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15200 pVCpu->iem.s.offCurInstrStart = 0;
15201 pVCpu->iem.s.offInstrNextByte = 0;
15202#else
15203 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15204 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15205#endif
15206 rcStrict = VINF_SUCCESS;
15207 }
15208 else
15209 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15210 if (rcStrict == VINF_SUCCESS)
15211 rcStrict = iemExecOneInner(pVCpu, false);
15212
15213#ifdef IN_RC
15214 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15215#endif
15216 return rcStrict;
15217}
15218
15219
15220/**
15221 * For debugging DISGetParamSize, may come in handy.
15222 *
15223 * @returns Strict VBox status code.
15224 * @param pVCpu The cross context virtual CPU structure of the
15225 * calling EMT.
15226 * @param pCtxCore The context core structure.
15227 * @param OpcodeBytesPC The PC of the opcode bytes.
15228 * @param pvOpcodeBytes Prefeched opcode bytes.
15229 * @param cbOpcodeBytes Number of prefetched bytes.
15230 * @param pcbWritten Where to return the number of bytes written.
15231 * Optional.
15232 */
15233VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15234 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
15235 uint32_t *pcbWritten)
15236{
15237 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15238 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15239
15240 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15241 VBOXSTRICTRC rcStrict;
15242 if ( cbOpcodeBytes
15243 && pCtx->rip == OpcodeBytesPC)
15244 {
15245 iemInitDecoder(pVCpu, true);
15246#ifdef IEM_WITH_CODE_TLB
15247 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15248 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15249 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15250 pVCpu->iem.s.offCurInstrStart = 0;
15251 pVCpu->iem.s.offInstrNextByte = 0;
15252#else
15253 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15254 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15255#endif
15256 rcStrict = VINF_SUCCESS;
15257 }
15258 else
15259 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15260 if (rcStrict == VINF_SUCCESS)
15261 {
15262 rcStrict = iemExecOneInner(pVCpu, false);
15263 if (pcbWritten)
15264 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15265 }
15266
15267#ifdef IN_RC
15268 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15269#endif
15270 return rcStrict;
15271}
15272
15273
15274VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
15275{
15276 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
15277
15278#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15279 /*
15280 * See if there is an interrupt pending in TRPM, inject it if we can.
15281 */
15282 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15283# ifdef IEM_VERIFICATION_MODE_FULL
15284 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15285# endif
15286
15287 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
15288# if defined(VBOX_WITH_NESTED_HWVIRT)
15289 bool fIntrEnabled = pCtx->hwvirt.Gif;
15290 if (fIntrEnabled)
15291 {
15292 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15293 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15294 else
15295 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15296 }
15297# else
15298 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15299# endif
15300 if ( fIntrEnabled
15301 && TRPMHasTrap(pVCpu)
15302 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15303 {
15304 uint8_t u8TrapNo;
15305 TRPMEVENT enmType;
15306 RTGCUINT uErrCode;
15307 RTGCPTR uCr2;
15308 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15309 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15310 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15311 TRPMResetTrap(pVCpu);
15312 }
15313
15314 /*
15315 * Log the state.
15316 */
15317# ifdef LOG_ENABLED
15318 iemLogCurInstr(pVCpu, pCtx, true);
15319# endif
15320
15321 /*
15322 * Do the decoding and emulation.
15323 */
15324 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15325 if (rcStrict == VINF_SUCCESS)
15326 rcStrict = iemExecOneInner(pVCpu, true);
15327
15328 /*
15329 * Assert some sanity.
15330 */
15331 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15332
15333 /*
15334 * Log and return.
15335 */
15336 if (rcStrict != VINF_SUCCESS)
15337 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15338 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15339 if (pcInstructions)
15340 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15341 return rcStrict;
15342
15343#else /* Not verification mode */
15344
15345 /*
15346 * See if there is an interrupt pending in TRPM, inject it if we can.
15347 */
15348 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15349# ifdef IEM_VERIFICATION_MODE_FULL
15350 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15351# endif
15352
15353 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
15354# if defined(VBOX_WITH_NESTED_HWVIRT)
15355 bool fIntrEnabled = pCtx->hwvirt.fGif;
15356 if (fIntrEnabled)
15357 {
15358 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15359 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15360 else
15361 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15362 }
15363# else
15364 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15365# endif
15366 if ( fIntrEnabled
15367 && TRPMHasTrap(pVCpu)
15368 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15369 {
15370 uint8_t u8TrapNo;
15371 TRPMEVENT enmType;
15372 RTGCUINT uErrCode;
15373 RTGCPTR uCr2;
15374 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15375 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15376 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15377 TRPMResetTrap(pVCpu);
15378 }
15379
15380 /*
15381 * Initial decoder init w/ prefetch, then setup setjmp.
15382 */
15383 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15384 if (rcStrict == VINF_SUCCESS)
15385 {
15386# ifdef IEM_WITH_SETJMP
15387 jmp_buf JmpBuf;
15388 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15389 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15390 pVCpu->iem.s.cActiveMappings = 0;
15391 if ((rcStrict = setjmp(JmpBuf)) == 0)
15392# endif
15393 {
15394 /*
15395 * The run loop. We limit ourselves to 4096 instructions right now.
15396 */
15397 PVM pVM = pVCpu->CTX_SUFF(pVM);
15398 uint32_t cInstr = 4096;
15399 for (;;)
15400 {
15401 /*
15402 * Log the state.
15403 */
15404# ifdef LOG_ENABLED
15405 iemLogCurInstr(pVCpu, pCtx, true);
15406# endif
15407
15408 /*
15409 * Do the decoding and emulation.
15410 */
15411 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15412 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15413 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15414 {
15415 Assert(pVCpu->iem.s.cActiveMappings == 0);
15416 pVCpu->iem.s.cInstructions++;
15417 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15418 {
15419 uint32_t fCpu = pVCpu->fLocalForcedActions
15420 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15421 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15422 | VMCPU_FF_TLB_FLUSH
15423# ifdef VBOX_WITH_RAW_MODE
15424 | VMCPU_FF_TRPM_SYNC_IDT
15425 | VMCPU_FF_SELM_SYNC_TSS
15426 | VMCPU_FF_SELM_SYNC_GDT
15427 | VMCPU_FF_SELM_SYNC_LDT
15428# endif
15429 | VMCPU_FF_INHIBIT_INTERRUPTS
15430 | VMCPU_FF_BLOCK_NMIS
15431 | VMCPU_FF_UNHALT ));
15432
15433 if (RT_LIKELY( ( !fCpu
15434 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15435 && !pCtx->rflags.Bits.u1IF) )
15436 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15437 {
15438 if (cInstr-- > 0)
15439 {
15440 Assert(pVCpu->iem.s.cActiveMappings == 0);
15441 iemReInitDecoder(pVCpu);
15442 continue;
15443 }
15444 }
15445 }
15446 Assert(pVCpu->iem.s.cActiveMappings == 0);
15447 }
15448 else if (pVCpu->iem.s.cActiveMappings > 0)
15449 iemMemRollback(pVCpu);
15450 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15451 break;
15452 }
15453 }
15454# ifdef IEM_WITH_SETJMP
15455 else
15456 {
15457 if (pVCpu->iem.s.cActiveMappings > 0)
15458 iemMemRollback(pVCpu);
15459 pVCpu->iem.s.cLongJumps++;
15460 }
15461 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15462# endif
15463
15464 /*
15465 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15466 */
15467 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15468 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15469# if defined(IEM_VERIFICATION_MODE_FULL)
15470 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15471 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15472 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15473 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15474# endif
15475 }
15476# ifdef VBOX_WITH_NESTED_HWVIRT
15477 else
15478 {
15479 /*
15480 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
15481 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
15482 */
15483 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15484 }
15485# endif
15486
15487 /*
15488 * Maybe re-enter raw-mode and log.
15489 */
15490# ifdef IN_RC
15491 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15492# endif
15493 if (rcStrict != VINF_SUCCESS)
15494 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15495 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15496 if (pcInstructions)
15497 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15498 return rcStrict;
15499#endif /* Not verification mode */
15500}
15501
15502
15503
15504/**
15505 * Injects a trap, fault, abort, software interrupt or external interrupt.
15506 *
15507 * The parameter list matches TRPMQueryTrapAll pretty closely.
15508 *
15509 * @returns Strict VBox status code.
15510 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15511 * @param u8TrapNo The trap number.
15512 * @param enmType What type is it (trap/fault/abort), software
15513 * interrupt or hardware interrupt.
15514 * @param uErrCode The error code if applicable.
15515 * @param uCr2 The CR2 value if applicable.
15516 * @param cbInstr The instruction length (only relevant for
15517 * software interrupts).
15518 */
15519VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15520 uint8_t cbInstr)
15521{
15522 iemInitDecoder(pVCpu, false);
15523#ifdef DBGFTRACE_ENABLED
15524 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15525 u8TrapNo, enmType, uErrCode, uCr2);
15526#endif
15527
15528 uint32_t fFlags;
15529 switch (enmType)
15530 {
15531 case TRPM_HARDWARE_INT:
15532 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15533 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15534 uErrCode = uCr2 = 0;
15535 break;
15536
15537 case TRPM_SOFTWARE_INT:
15538 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15539 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15540 uErrCode = uCr2 = 0;
15541 break;
15542
15543 case TRPM_TRAP:
15544 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15545 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15546 if (u8TrapNo == X86_XCPT_PF)
15547 fFlags |= IEM_XCPT_FLAGS_CR2;
15548 switch (u8TrapNo)
15549 {
15550 case X86_XCPT_DF:
15551 case X86_XCPT_TS:
15552 case X86_XCPT_NP:
15553 case X86_XCPT_SS:
15554 case X86_XCPT_PF:
15555 case X86_XCPT_AC:
15556 fFlags |= IEM_XCPT_FLAGS_ERR;
15557 break;
15558
15559 case X86_XCPT_NMI:
15560 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15561 break;
15562 }
15563 break;
15564
15565 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15566 }
15567
15568 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15569}
15570
15571
15572/**
15573 * Injects the active TRPM event.
15574 *
15575 * @returns Strict VBox status code.
15576 * @param pVCpu The cross context virtual CPU structure.
15577 */
15578VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15579{
15580#ifndef IEM_IMPLEMENTS_TASKSWITCH
15581 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15582#else
15583 uint8_t u8TrapNo;
15584 TRPMEVENT enmType;
15585 RTGCUINT uErrCode;
15586 RTGCUINTPTR uCr2;
15587 uint8_t cbInstr;
15588 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15589 if (RT_FAILURE(rc))
15590 return rc;
15591
15592 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15593
15594 /** @todo Are there any other codes that imply the event was successfully
15595 * delivered to the guest? See @bugref{6607}. */
15596 if ( rcStrict == VINF_SUCCESS
15597 || rcStrict == VINF_IEM_RAISED_XCPT)
15598 {
15599 TRPMResetTrap(pVCpu);
15600 }
15601 return rcStrict;
15602#endif
15603}
15604
15605
15606VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15607{
15608 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15609 return VERR_NOT_IMPLEMENTED;
15610}
15611
15612
15613VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15614{
15615 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15616 return VERR_NOT_IMPLEMENTED;
15617}
15618
15619
15620#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15621/**
15622 * Executes a IRET instruction with default operand size.
15623 *
15624 * This is for PATM.
15625 *
15626 * @returns VBox status code.
15627 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15628 * @param pCtxCore The register frame.
15629 */
15630VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15631{
15632 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15633
15634 iemCtxCoreToCtx(pCtx, pCtxCore);
15635 iemInitDecoder(pVCpu);
15636 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15637 if (rcStrict == VINF_SUCCESS)
15638 iemCtxToCtxCore(pCtxCore, pCtx);
15639 else
15640 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15641 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15642 return rcStrict;
15643}
15644#endif
15645
15646
15647/**
15648 * Macro used by the IEMExec* method to check the given instruction length.
15649 *
15650 * Will return on failure!
15651 *
15652 * @param a_cbInstr The given instruction length.
15653 * @param a_cbMin The minimum length.
15654 */
15655#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15656 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15657 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15658
15659
15660/**
15661 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15662 *
15663 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15664 *
15665 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15667 * @param rcStrict The status code to fiddle.
15668 */
15669DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15670{
15671 iemUninitExec(pVCpu);
15672#ifdef IN_RC
15673 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15674 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15675#else
15676 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15677#endif
15678}
15679
15680
15681/**
15682 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15683 *
15684 * This API ASSUMES that the caller has already verified that the guest code is
15685 * allowed to access the I/O port. (The I/O port is in the DX register in the
15686 * guest state.)
15687 *
15688 * @returns Strict VBox status code.
15689 * @param pVCpu The cross context virtual CPU structure.
15690 * @param cbValue The size of the I/O port access (1, 2, or 4).
15691 * @param enmAddrMode The addressing mode.
15692 * @param fRepPrefix Indicates whether a repeat prefix is used
15693 * (doesn't matter which for this instruction).
15694 * @param cbInstr The instruction length in bytes.
15695 * @param iEffSeg The effective segment address.
15696 * @param fIoChecked Whether the access to the I/O port has been
15697 * checked or not. It's typically checked in the
15698 * HM scenario.
15699 */
15700VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15701 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15702{
15703 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15704 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15705
15706 /*
15707 * State init.
15708 */
15709 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15710
15711 /*
15712 * Switch orgy for getting to the right handler.
15713 */
15714 VBOXSTRICTRC rcStrict;
15715 if (fRepPrefix)
15716 {
15717 switch (enmAddrMode)
15718 {
15719 case IEMMODE_16BIT:
15720 switch (cbValue)
15721 {
15722 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15723 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15724 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15725 default:
15726 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15727 }
15728 break;
15729
15730 case IEMMODE_32BIT:
15731 switch (cbValue)
15732 {
15733 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15734 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15735 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15736 default:
15737 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15738 }
15739 break;
15740
15741 case IEMMODE_64BIT:
15742 switch (cbValue)
15743 {
15744 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15745 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15746 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15747 default:
15748 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15749 }
15750 break;
15751
15752 default:
15753 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15754 }
15755 }
15756 else
15757 {
15758 switch (enmAddrMode)
15759 {
15760 case IEMMODE_16BIT:
15761 switch (cbValue)
15762 {
15763 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15764 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15765 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15766 default:
15767 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15768 }
15769 break;
15770
15771 case IEMMODE_32BIT:
15772 switch (cbValue)
15773 {
15774 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15775 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15776 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15777 default:
15778 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15779 }
15780 break;
15781
15782 case IEMMODE_64BIT:
15783 switch (cbValue)
15784 {
15785 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15786 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15787 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15788 default:
15789 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15790 }
15791 break;
15792
15793 default:
15794 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15795 }
15796 }
15797
15798 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15799}
15800
15801
15802/**
15803 * Interface for HM and EM for executing string I/O IN (read) instructions.
15804 *
15805 * This API ASSUMES that the caller has already verified that the guest code is
15806 * allowed to access the I/O port. (The I/O port is in the DX register in the
15807 * guest state.)
15808 *
15809 * @returns Strict VBox status code.
15810 * @param pVCpu The cross context virtual CPU structure.
15811 * @param cbValue The size of the I/O port access (1, 2, or 4).
15812 * @param enmAddrMode The addressing mode.
15813 * @param fRepPrefix Indicates whether a repeat prefix is used
15814 * (doesn't matter which for this instruction).
15815 * @param cbInstr The instruction length in bytes.
15816 * @param fIoChecked Whether the access to the I/O port has been
15817 * checked or not. It's typically checked in the
15818 * HM scenario.
15819 */
15820VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15821 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15822{
15823 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15824
15825 /*
15826 * State init.
15827 */
15828 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15829
15830 /*
15831 * Switch orgy for getting to the right handler.
15832 */
15833 VBOXSTRICTRC rcStrict;
15834 if (fRepPrefix)
15835 {
15836 switch (enmAddrMode)
15837 {
15838 case IEMMODE_16BIT:
15839 switch (cbValue)
15840 {
15841 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15842 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15843 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15844 default:
15845 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15846 }
15847 break;
15848
15849 case IEMMODE_32BIT:
15850 switch (cbValue)
15851 {
15852 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15853 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15854 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15855 default:
15856 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15857 }
15858 break;
15859
15860 case IEMMODE_64BIT:
15861 switch (cbValue)
15862 {
15863 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15864 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15865 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15866 default:
15867 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15868 }
15869 break;
15870
15871 default:
15872 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15873 }
15874 }
15875 else
15876 {
15877 switch (enmAddrMode)
15878 {
15879 case IEMMODE_16BIT:
15880 switch (cbValue)
15881 {
15882 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15883 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15884 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15885 default:
15886 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15887 }
15888 break;
15889
15890 case IEMMODE_32BIT:
15891 switch (cbValue)
15892 {
15893 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15894 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15895 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15896 default:
15897 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15898 }
15899 break;
15900
15901 case IEMMODE_64BIT:
15902 switch (cbValue)
15903 {
15904 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15905 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15906 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15907 default:
15908 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15909 }
15910 break;
15911
15912 default:
15913 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15914 }
15915 }
15916
15917 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15918}
15919
15920
15921/**
15922 * Interface for rawmode to write execute an OUT instruction.
15923 *
15924 * @returns Strict VBox status code.
15925 * @param pVCpu The cross context virtual CPU structure.
15926 * @param cbInstr The instruction length in bytes.
15927 * @param u16Port The port to read.
15928 * @param cbReg The register size.
15929 *
15930 * @remarks In ring-0 not all of the state needs to be synced in.
15931 */
15932VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15933{
15934 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15935 Assert(cbReg <= 4 && cbReg != 3);
15936
15937 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15938 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15939 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15940}
15941
15942
15943/**
15944 * Interface for rawmode to write execute an IN instruction.
15945 *
15946 * @returns Strict VBox status code.
15947 * @param pVCpu The cross context virtual CPU structure.
15948 * @param cbInstr The instruction length in bytes.
15949 * @param u16Port The port to read.
15950 * @param cbReg The register size.
15951 */
15952VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15953{
15954 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15955 Assert(cbReg <= 4 && cbReg != 3);
15956
15957 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15958 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15959 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15960}
15961
15962
15963/**
15964 * Interface for HM and EM to write to a CRx register.
15965 *
15966 * @returns Strict VBox status code.
15967 * @param pVCpu The cross context virtual CPU structure.
15968 * @param cbInstr The instruction length in bytes.
15969 * @param iCrReg The control register number (destination).
15970 * @param iGReg The general purpose register number (source).
15971 *
15972 * @remarks In ring-0 not all of the state needs to be synced in.
15973 */
15974VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15975{
15976 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15977 Assert(iCrReg < 16);
15978 Assert(iGReg < 16);
15979
15980 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15981 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15982 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15983}
15984
15985
15986/**
15987 * Interface for HM and EM to read from a CRx register.
15988 *
15989 * @returns Strict VBox status code.
15990 * @param pVCpu The cross context virtual CPU structure.
15991 * @param cbInstr The instruction length in bytes.
15992 * @param iGReg The general purpose register number (destination).
15993 * @param iCrReg The control register number (source).
15994 *
15995 * @remarks In ring-0 not all of the state needs to be synced in.
15996 */
15997VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15998{
15999 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16000 Assert(iCrReg < 16);
16001 Assert(iGReg < 16);
16002
16003 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16004 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
16005 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16006}
16007
16008
16009/**
16010 * Interface for HM and EM to clear the CR0[TS] bit.
16011 *
16012 * @returns Strict VBox status code.
16013 * @param pVCpu The cross context virtual CPU structure.
16014 * @param cbInstr The instruction length in bytes.
16015 *
16016 * @remarks In ring-0 not all of the state needs to be synced in.
16017 */
16018VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
16019{
16020 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16021
16022 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16023 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
16024 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16025}
16026
16027
16028/**
16029 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
16030 *
16031 * @returns Strict VBox status code.
16032 * @param pVCpu The cross context virtual CPU structure.
16033 * @param cbInstr The instruction length in bytes.
16034 * @param uValue The value to load into CR0.
16035 *
16036 * @remarks In ring-0 not all of the state needs to be synced in.
16037 */
16038VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
16039{
16040 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16041
16042 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16043 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
16044 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16045}
16046
16047
16048/**
16049 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
16050 *
16051 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
16052 *
16053 * @returns Strict VBox status code.
16054 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16055 * @param cbInstr The instruction length in bytes.
16056 * @remarks In ring-0 not all of the state needs to be synced in.
16057 * @thread EMT(pVCpu)
16058 */
16059VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
16060{
16061 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16062
16063 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16064 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
16065 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16066}
16067
16068
16069/**
16070 * Interface for HM and EM to emulate the INVLPG instruction.
16071 *
16072 * @param pVCpu The cross context virtual CPU structure.
16073 * @param cbInstr The instruction length in bytes.
16074 * @param GCPtrPage The effective address of the page to invalidate.
16075 *
16076 * @remarks In ring-0 not all of the state needs to be synced in.
16077 */
16078VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
16079{
16080 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16081
16082 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16083 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
16084 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16085}
16086
16087
16088/**
16089 * Interface for HM and EM to emulate the INVPCID instruction.
16090 *
16091 * @param pVCpu The cross context virtual CPU structure.
16092 * @param cbInstr The instruction length in bytes.
16093 * @param uType The invalidation type.
16094 * @param GCPtrInvpcidDesc The effective address of the INVPCID descriptor.
16095 *
16096 * @remarks In ring-0 not all of the state needs to be synced in.
16097 */
16098VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)
16099{
16100 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
16101
16102 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16103 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);
16104 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16105}
16106
16107
16108/**
16109 * Checks if IEM is in the process of delivering an event (interrupt or
16110 * exception).
16111 *
16112 * @returns true if we're in the process of raising an interrupt or exception,
16113 * false otherwise.
16114 * @param pVCpu The cross context virtual CPU structure.
16115 * @param puVector Where to store the vector associated with the
16116 * currently delivered event, optional.
16117 * @param pfFlags Where to store th event delivery flags (see
16118 * IEM_XCPT_FLAGS_XXX), optional.
16119 * @param puErr Where to store the error code associated with the
16120 * event, optional.
16121 * @param puCr2 Where to store the CR2 associated with the event,
16122 * optional.
16123 * @remarks The caller should check the flags to determine if the error code and
16124 * CR2 are valid for the event.
16125 */
16126VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
16127{
16128 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16129 if (fRaisingXcpt)
16130 {
16131 if (puVector)
16132 *puVector = pVCpu->iem.s.uCurXcpt;
16133 if (pfFlags)
16134 *pfFlags = pVCpu->iem.s.fCurXcpt;
16135 if (puErr)
16136 *puErr = pVCpu->iem.s.uCurXcptErr;
16137 if (puCr2)
16138 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16139 }
16140 return fRaisingXcpt;
16141}
16142
16143#ifdef VBOX_WITH_NESTED_HWVIRT
16144/**
16145 * Interface for HM and EM to emulate the CLGI instruction.
16146 *
16147 * @returns Strict VBox status code.
16148 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16149 * @param cbInstr The instruction length in bytes.
16150 * @thread EMT(pVCpu)
16151 */
16152VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
16153{
16154 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16155
16156 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16157 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16158 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16159}
16160
16161
16162/**
16163 * Interface for HM and EM to emulate the STGI instruction.
16164 *
16165 * @returns Strict VBox status code.
16166 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16167 * @param cbInstr The instruction length in bytes.
16168 * @thread EMT(pVCpu)
16169 */
16170VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
16171{
16172 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16173
16174 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16175 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16176 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16177}
16178
16179
16180/**
16181 * Interface for HM and EM to emulate the VMLOAD instruction.
16182 *
16183 * @returns Strict VBox status code.
16184 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16185 * @param cbInstr The instruction length in bytes.
16186 * @thread EMT(pVCpu)
16187 */
16188VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
16189{
16190 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16191
16192 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16193 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16194 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16195}
16196
16197
16198/**
16199 * Interface for HM and EM to emulate the VMSAVE instruction.
16200 *
16201 * @returns Strict VBox status code.
16202 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16203 * @param cbInstr The instruction length in bytes.
16204 * @thread EMT(pVCpu)
16205 */
16206VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
16207{
16208 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16209
16210 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16211 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16212 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16213}
16214
16215
16216/**
16217 * Interface for HM and EM to emulate the INVLPGA instruction.
16218 *
16219 * @returns Strict VBox status code.
16220 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16221 * @param cbInstr The instruction length in bytes.
16222 * @thread EMT(pVCpu)
16223 */
16224VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
16225{
16226 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16227
16228 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16229 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16230 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16231}
16232
16233
16234/**
16235 * Interface for HM and EM to emulate the VMRUN instruction.
16236 *
16237 * @returns Strict VBox status code.
16238 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16239 * @param cbInstr The instruction length in bytes.
16240 * @thread EMT(pVCpu)
16241 */
16242VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
16243{
16244 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16245
16246 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16247 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
16248 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16249}
16250
16251
16252/**
16253 * Interface for HM and EM to emulate \#VMEXIT.
16254 *
16255 * @returns Strict VBox status code.
16256 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16257 * @param uExitCode The exit code.
16258 * @param uExitInfo1 The exit info. 1 field.
16259 * @param uExitInfo2 The exit info. 2 field.
16260 * @thread EMT(pVCpu)
16261 */
16262VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
16263{
16264 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2);
16265 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16266}
16267#endif /* VBOX_WITH_NESTED_HWVIRT */
16268
16269#ifdef IN_RING3
16270
16271/**
16272 * Handles the unlikely and probably fatal merge cases.
16273 *
16274 * @returns Merged status code.
16275 * @param rcStrict Current EM status code.
16276 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16277 * with @a rcStrict.
16278 * @param iMemMap The memory mapping index. For error reporting only.
16279 * @param pVCpu The cross context virtual CPU structure of the calling
16280 * thread, for error reporting only.
16281 */
16282DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16283 unsigned iMemMap, PVMCPU pVCpu)
16284{
16285 if (RT_FAILURE_NP(rcStrict))
16286 return rcStrict;
16287
16288 if (RT_FAILURE_NP(rcStrictCommit))
16289 return rcStrictCommit;
16290
16291 if (rcStrict == rcStrictCommit)
16292 return rcStrictCommit;
16293
16294 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16295 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16296 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16297 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16298 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16299 return VERR_IOM_FF_STATUS_IPE;
16300}
16301
16302
16303/**
16304 * Helper for IOMR3ProcessForceFlag.
16305 *
16306 * @returns Merged status code.
16307 * @param rcStrict Current EM status code.
16308 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16309 * with @a rcStrict.
16310 * @param iMemMap The memory mapping index. For error reporting only.
16311 * @param pVCpu The cross context virtual CPU structure of the calling
16312 * thread, for error reporting only.
16313 */
16314DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16315{
16316 /* Simple. */
16317 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16318 return rcStrictCommit;
16319
16320 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16321 return rcStrict;
16322
16323 /* EM scheduling status codes. */
16324 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16325 && rcStrict <= VINF_EM_LAST))
16326 {
16327 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16328 && rcStrictCommit <= VINF_EM_LAST))
16329 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16330 }
16331
16332 /* Unlikely */
16333 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16334}
16335
16336
16337/**
16338 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16339 *
16340 * @returns Merge between @a rcStrict and what the commit operation returned.
16341 * @param pVM The cross context VM structure.
16342 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16343 * @param rcStrict The status code returned by ring-0 or raw-mode.
16344 */
16345VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16346{
16347 /*
16348 * Reset the pending commit.
16349 */
16350 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16351 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16352 ("%#x %#x %#x\n",
16353 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16354 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16355
16356 /*
16357 * Commit the pending bounce buffers (usually just one).
16358 */
16359 unsigned cBufs = 0;
16360 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16361 while (iMemMap-- > 0)
16362 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16363 {
16364 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16365 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16366 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16367
16368 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16369 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16370 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16371
16372 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16373 {
16374 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16375 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16376 pbBuf,
16377 cbFirst,
16378 PGMACCESSORIGIN_IEM);
16379 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16380 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16381 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16382 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16383 }
16384
16385 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16386 {
16387 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16388 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16389 pbBuf + cbFirst,
16390 cbSecond,
16391 PGMACCESSORIGIN_IEM);
16392 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16393 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16394 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16395 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16396 }
16397 cBufs++;
16398 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16399 }
16400
16401 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16402 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16403 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16404 pVCpu->iem.s.cActiveMappings = 0;
16405 return rcStrict;
16406}
16407
16408#endif /* IN_RING3 */
16409
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette