VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 72441

Last change on this file since 72441 was 72441, checked in by vboxsync, 7 years ago

VMM/IEM: Nested hw.virt: Fixes when nested-paging isn't enabled in the outer guest.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 642.5 KB
Line 
1/* $Id: IEMAll.cpp 72441 2018-06-05 05:45:38Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
105# include <VBox/vmm/em.h>
106# include <VBox/vmm/hm_svm.h>
107#endif
108#include <VBox/vmm/tm.h>
109#include <VBox/vmm/dbgf.h>
110#include <VBox/vmm/dbgftrace.h>
111#ifdef VBOX_WITH_RAW_MODE_NOT_R0
112# include <VBox/vmm/patm.h>
113# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
114# include <VBox/vmm/csam.h>
115# endif
116#endif
117#include "IEMInternal.h"
118#ifdef IEM_VERIFICATION_MODE_FULL
119# include <VBox/vmm/rem.h>
120# include <VBox/vmm/mm.h>
121#endif
122#include <VBox/vmm/vm.h>
123#include <VBox/log.h>
124#include <VBox/err.h>
125#include <VBox/param.h>
126#include <VBox/dis.h>
127#include <VBox/disopcode.h>
128#include <iprt/assert.h>
129#include <iprt/string.h>
130#include <iprt/x86.h>
131
132
133/*********************************************************************************************************************************
134* Structures and Typedefs *
135*********************************************************************************************************************************/
136/** @typedef PFNIEMOP
137 * Pointer to an opcode decoder function.
138 */
139
140/** @def FNIEMOP_DEF
141 * Define an opcode decoder function.
142 *
143 * We're using macors for this so that adding and removing parameters as well as
144 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
145 *
146 * @param a_Name The function name.
147 */
148
149/** @typedef PFNIEMOPRM
150 * Pointer to an opcode decoder function with RM byte.
151 */
152
153/** @def FNIEMOPRM_DEF
154 * Define an opcode decoder function with RM byte.
155 *
156 * We're using macors for this so that adding and removing parameters as well as
157 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
158 *
159 * @param a_Name The function name.
160 */
161
162#if defined(__GNUC__) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
164typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
171
172#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
174typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
181
182#elif defined(__GNUC__)
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
191
192#else
193typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
194typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
195# define FNIEMOP_DEF(a_Name) \
196 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
197# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
198 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
199# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
200 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
201
202#endif
203#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
204
205
206/**
207 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
208 */
209typedef union IEMSELDESC
210{
211 /** The legacy view. */
212 X86DESC Legacy;
213 /** The long mode view. */
214 X86DESC64 Long;
215} IEMSELDESC;
216/** Pointer to a selector descriptor table entry. */
217typedef IEMSELDESC *PIEMSELDESC;
218
219/**
220 * CPU exception classes.
221 */
222typedef enum IEMXCPTCLASS
223{
224 IEMXCPTCLASS_BENIGN,
225 IEMXCPTCLASS_CONTRIBUTORY,
226 IEMXCPTCLASS_PAGE_FAULT,
227 IEMXCPTCLASS_DOUBLE_FAULT
228} IEMXCPTCLASS;
229
230
231/*********************************************************************************************************************************
232* Defined Constants And Macros *
233*********************************************************************************************************************************/
234/** @def IEM_WITH_SETJMP
235 * Enables alternative status code handling using setjmps.
236 *
237 * This adds a bit of expense via the setjmp() call since it saves all the
238 * non-volatile registers. However, it eliminates return code checks and allows
239 * for more optimal return value passing (return regs instead of stack buffer).
240 */
241#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
242# define IEM_WITH_SETJMP
243#endif
244
245/** Temporary hack to disable the double execution. Will be removed in favor
246 * of a dedicated execution mode in EM. */
247//#define IEM_VERIFICATION_MODE_NO_REM
248
249/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
250 * due to GCC lacking knowledge about the value range of a switch. */
251#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
252
253/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
254#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
255
256/**
257 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
258 * occation.
259 */
260#ifdef LOG_ENABLED
261# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
262 do { \
263 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
265 } while (0)
266#else
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
269#endif
270
271/**
272 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
273 * occation using the supplied logger statement.
274 *
275 * @param a_LoggerArgs What to log on failure.
276 */
277#ifdef LOG_ENABLED
278# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
279 do { \
280 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
281 /*LogFunc(a_LoggerArgs);*/ \
282 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
283 } while (0)
284#else
285# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
286 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
287#endif
288
289/**
290 * Call an opcode decoder function.
291 *
292 * We're using macors for this so that adding and removing parameters can be
293 * done as we please. See FNIEMOP_DEF.
294 */
295#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
296
297/**
298 * Call a common opcode decoder function taking one extra argument.
299 *
300 * We're using macors for this so that adding and removing parameters can be
301 * done as we please. See FNIEMOP_DEF_1.
302 */
303#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
304
305/**
306 * Call a common opcode decoder function taking one extra argument.
307 *
308 * We're using macors for this so that adding and removing parameters can be
309 * done as we please. See FNIEMOP_DEF_1.
310 */
311#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
312
313/**
314 * Check if we're currently executing in real or virtual 8086 mode.
315 *
316 * @returns @c true if it is, @c false if not.
317 * @param a_pVCpu The IEM state of the current CPU.
318 */
319#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
320
321/**
322 * Check if we're currently executing in virtual 8086 mode.
323 *
324 * @returns @c true if it is, @c false if not.
325 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
326 */
327#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
328
329/**
330 * Check if we're currently executing in long mode.
331 *
332 * @returns @c true if it is, @c false if not.
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
336
337/**
338 * Check if we're currently executing in real mode.
339 *
340 * @returns @c true if it is, @c false if not.
341 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
342 */
343#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
344
345/**
346 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
347 * @returns PCCPUMFEATURES
348 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
349 */
350#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
351
352/**
353 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
354 * @returns PCCPUMFEATURES
355 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
356 */
357#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
358
359/**
360 * Evaluates to true if we're presenting an Intel CPU to the guest.
361 */
362#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
363
364/**
365 * Evaluates to true if we're presenting an AMD CPU to the guest.
366 */
367#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
368
369/**
370 * Check if the address is canonical.
371 */
372#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
373
374/**
375 * Gets the effective VEX.VVVV value.
376 *
377 * The 4th bit is ignored if not 64-bit code.
378 * @returns effective V-register value.
379 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
380 */
381#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
382 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
383
384/** @def IEM_USE_UNALIGNED_DATA_ACCESS
385 * Use unaligned accesses instead of elaborate byte assembly. */
386#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
387# define IEM_USE_UNALIGNED_DATA_ACCESS
388#endif
389
390#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
391/**
392 * Check the common SVM instruction preconditions.
393 */
394# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
395 do { \
396 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
397 { \
398 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
399 return iemRaiseUndefinedOpcode(pVCpu); \
400 } \
401 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
402 { \
403 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
404 return iemRaiseUndefinedOpcode(pVCpu); \
405 } \
406 if (pVCpu->iem.s.uCpl != 0) \
407 { \
408 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
409 return iemRaiseGeneralProtectionFault0(pVCpu); \
410 } \
411 } while (0)
412
413/**
414 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
415 */
416# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
417 do { \
418 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
419 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
420 } while (0)
421
422/**
423 * Check if an SVM is enabled.
424 */
425# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
426
427/**
428 * Check if an SVM control/instruction intercept is set.
429 */
430# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
431
432/**
433 * Check if an SVM read CRx intercept is set.
434 */
435# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
436
437/**
438 * Check if an SVM write CRx intercept is set.
439 */
440# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
441
442/**
443 * Check if an SVM read DRx intercept is set.
444 */
445# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
446
447/**
448 * Check if an SVM write DRx intercept is set.
449 */
450# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
451
452/**
453 * Check if an SVM exception intercept is set.
454 */
455# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
456
457/**
458 * Get the SVM pause-filter count.
459 */
460# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (CPUMGetGuestSvmPauseFilterCount(a_pVCpu, IEM_GET_CTX(a_pVCpu)))
461
462/**
463 * Invokes the SVM \#VMEXIT handler for the nested-guest.
464 */
465# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
466 do \
467 { \
468 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
469 } while (0)
470
471/**
472 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
473 * corresponding decode assist information.
474 */
475# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
476 do \
477 { \
478 uint64_t uExitInfo1; \
479 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
480 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
481 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
482 else \
483 uExitInfo1 = 0; \
484 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
485 } while (0)
486
487#else
488# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
489# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
490# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
491# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
492# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
493# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
494# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
495# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
496# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
497# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (0)
498# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
499# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
500
501#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
502
503
504/*********************************************************************************************************************************
505* Global Variables *
506*********************************************************************************************************************************/
507extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
508
509
510/** Function table for the ADD instruction. */
511IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
512{
513 iemAImpl_add_u8, iemAImpl_add_u8_locked,
514 iemAImpl_add_u16, iemAImpl_add_u16_locked,
515 iemAImpl_add_u32, iemAImpl_add_u32_locked,
516 iemAImpl_add_u64, iemAImpl_add_u64_locked
517};
518
519/** Function table for the ADC instruction. */
520IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
521{
522 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
523 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
524 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
525 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
526};
527
528/** Function table for the SUB instruction. */
529IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
530{
531 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
532 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
533 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
534 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
535};
536
537/** Function table for the SBB instruction. */
538IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
539{
540 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
541 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
542 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
543 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
544};
545
546/** Function table for the OR instruction. */
547IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
548{
549 iemAImpl_or_u8, iemAImpl_or_u8_locked,
550 iemAImpl_or_u16, iemAImpl_or_u16_locked,
551 iemAImpl_or_u32, iemAImpl_or_u32_locked,
552 iemAImpl_or_u64, iemAImpl_or_u64_locked
553};
554
555/** Function table for the XOR instruction. */
556IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
557{
558 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
559 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
560 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
561 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
562};
563
564/** Function table for the AND instruction. */
565IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
566{
567 iemAImpl_and_u8, iemAImpl_and_u8_locked,
568 iemAImpl_and_u16, iemAImpl_and_u16_locked,
569 iemAImpl_and_u32, iemAImpl_and_u32_locked,
570 iemAImpl_and_u64, iemAImpl_and_u64_locked
571};
572
573/** Function table for the CMP instruction.
574 * @remarks Making operand order ASSUMPTIONS.
575 */
576IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
577{
578 iemAImpl_cmp_u8, NULL,
579 iemAImpl_cmp_u16, NULL,
580 iemAImpl_cmp_u32, NULL,
581 iemAImpl_cmp_u64, NULL
582};
583
584/** Function table for the TEST instruction.
585 * @remarks Making operand order ASSUMPTIONS.
586 */
587IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
588{
589 iemAImpl_test_u8, NULL,
590 iemAImpl_test_u16, NULL,
591 iemAImpl_test_u32, NULL,
592 iemAImpl_test_u64, NULL
593};
594
595/** Function table for the BT instruction. */
596IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
597{
598 NULL, NULL,
599 iemAImpl_bt_u16, NULL,
600 iemAImpl_bt_u32, NULL,
601 iemAImpl_bt_u64, NULL
602};
603
604/** Function table for the BTC instruction. */
605IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
606{
607 NULL, NULL,
608 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
609 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
610 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
611};
612
613/** Function table for the BTR instruction. */
614IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
615{
616 NULL, NULL,
617 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
618 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
619 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
620};
621
622/** Function table for the BTS instruction. */
623IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
624{
625 NULL, NULL,
626 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
627 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
628 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
629};
630
631/** Function table for the BSF instruction. */
632IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
633{
634 NULL, NULL,
635 iemAImpl_bsf_u16, NULL,
636 iemAImpl_bsf_u32, NULL,
637 iemAImpl_bsf_u64, NULL
638};
639
640/** Function table for the BSR instruction. */
641IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
642{
643 NULL, NULL,
644 iemAImpl_bsr_u16, NULL,
645 iemAImpl_bsr_u32, NULL,
646 iemAImpl_bsr_u64, NULL
647};
648
649/** Function table for the IMUL instruction. */
650IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
651{
652 NULL, NULL,
653 iemAImpl_imul_two_u16, NULL,
654 iemAImpl_imul_two_u32, NULL,
655 iemAImpl_imul_two_u64, NULL
656};
657
658/** Group 1 /r lookup table. */
659IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
660{
661 &g_iemAImpl_add,
662 &g_iemAImpl_or,
663 &g_iemAImpl_adc,
664 &g_iemAImpl_sbb,
665 &g_iemAImpl_and,
666 &g_iemAImpl_sub,
667 &g_iemAImpl_xor,
668 &g_iemAImpl_cmp
669};
670
671/** Function table for the INC instruction. */
672IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
673{
674 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
675 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
676 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
677 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
678};
679
680/** Function table for the DEC instruction. */
681IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
682{
683 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
684 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
685 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
686 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
687};
688
689/** Function table for the NEG instruction. */
690IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
691{
692 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
693 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
694 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
695 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
696};
697
698/** Function table for the NOT instruction. */
699IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
700{
701 iemAImpl_not_u8, iemAImpl_not_u8_locked,
702 iemAImpl_not_u16, iemAImpl_not_u16_locked,
703 iemAImpl_not_u32, iemAImpl_not_u32_locked,
704 iemAImpl_not_u64, iemAImpl_not_u64_locked
705};
706
707
708/** Function table for the ROL instruction. */
709IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
710{
711 iemAImpl_rol_u8,
712 iemAImpl_rol_u16,
713 iemAImpl_rol_u32,
714 iemAImpl_rol_u64
715};
716
717/** Function table for the ROR instruction. */
718IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
719{
720 iemAImpl_ror_u8,
721 iemAImpl_ror_u16,
722 iemAImpl_ror_u32,
723 iemAImpl_ror_u64
724};
725
726/** Function table for the RCL instruction. */
727IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
728{
729 iemAImpl_rcl_u8,
730 iemAImpl_rcl_u16,
731 iemAImpl_rcl_u32,
732 iemAImpl_rcl_u64
733};
734
735/** Function table for the RCR instruction. */
736IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
737{
738 iemAImpl_rcr_u8,
739 iemAImpl_rcr_u16,
740 iemAImpl_rcr_u32,
741 iemAImpl_rcr_u64
742};
743
744/** Function table for the SHL instruction. */
745IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
746{
747 iemAImpl_shl_u8,
748 iemAImpl_shl_u16,
749 iemAImpl_shl_u32,
750 iemAImpl_shl_u64
751};
752
753/** Function table for the SHR instruction. */
754IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
755{
756 iemAImpl_shr_u8,
757 iemAImpl_shr_u16,
758 iemAImpl_shr_u32,
759 iemAImpl_shr_u64
760};
761
762/** Function table for the SAR instruction. */
763IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
764{
765 iemAImpl_sar_u8,
766 iemAImpl_sar_u16,
767 iemAImpl_sar_u32,
768 iemAImpl_sar_u64
769};
770
771
772/** Function table for the MUL instruction. */
773IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
774{
775 iemAImpl_mul_u8,
776 iemAImpl_mul_u16,
777 iemAImpl_mul_u32,
778 iemAImpl_mul_u64
779};
780
781/** Function table for the IMUL instruction working implicitly on rAX. */
782IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
783{
784 iemAImpl_imul_u8,
785 iemAImpl_imul_u16,
786 iemAImpl_imul_u32,
787 iemAImpl_imul_u64
788};
789
790/** Function table for the DIV instruction. */
791IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
792{
793 iemAImpl_div_u8,
794 iemAImpl_div_u16,
795 iemAImpl_div_u32,
796 iemAImpl_div_u64
797};
798
799/** Function table for the MUL instruction. */
800IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
801{
802 iemAImpl_idiv_u8,
803 iemAImpl_idiv_u16,
804 iemAImpl_idiv_u32,
805 iemAImpl_idiv_u64
806};
807
808/** Function table for the SHLD instruction */
809IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
810{
811 iemAImpl_shld_u16,
812 iemAImpl_shld_u32,
813 iemAImpl_shld_u64,
814};
815
816/** Function table for the SHRD instruction */
817IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
818{
819 iemAImpl_shrd_u16,
820 iemAImpl_shrd_u32,
821 iemAImpl_shrd_u64,
822};
823
824
825/** Function table for the PUNPCKLBW instruction */
826IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
827/** Function table for the PUNPCKLBD instruction */
828IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
829/** Function table for the PUNPCKLDQ instruction */
830IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
831/** Function table for the PUNPCKLQDQ instruction */
832IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
833
834/** Function table for the PUNPCKHBW instruction */
835IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
836/** Function table for the PUNPCKHBD instruction */
837IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
838/** Function table for the PUNPCKHDQ instruction */
839IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
840/** Function table for the PUNPCKHQDQ instruction */
841IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
842
843/** Function table for the PXOR instruction */
844IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
845/** Function table for the PCMPEQB instruction */
846IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
847/** Function table for the PCMPEQW instruction */
848IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
849/** Function table for the PCMPEQD instruction */
850IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
851
852
853#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
854/** What IEM just wrote. */
855uint8_t g_abIemWrote[256];
856/** How much IEM just wrote. */
857size_t g_cbIemWrote;
858#endif
859
860
861/*********************************************************************************************************************************
862* Internal Functions *
863*********************************************************************************************************************************/
864IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
865IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
866IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
867IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
868/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
869IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
870IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
871IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
872IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
873IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
874IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
875IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
876IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
877IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
878IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
879IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
880IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
881#ifdef IEM_WITH_SETJMP
882DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
883DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
884DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
885DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
886DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
887#endif
888
889IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
890IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
891IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
892IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
893IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
894IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
895IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
896IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
897IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
898IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
899IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
900IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
901IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
902IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
903IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
904IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
905IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
906
907#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
908IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
909#endif
910IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
911IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
912
913#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
914IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
915 uint64_t uExitInfo2);
916IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
917 uint32_t uErr, uint64_t uCr2);
918#endif
919
920/**
921 * Sets the pass up status.
922 *
923 * @returns VINF_SUCCESS.
924 * @param pVCpu The cross context virtual CPU structure of the
925 * calling thread.
926 * @param rcPassUp The pass up status. Must be informational.
927 * VINF_SUCCESS is not allowed.
928 */
929IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
930{
931 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
932
933 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
934 if (rcOldPassUp == VINF_SUCCESS)
935 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
936 /* If both are EM scheduling codes, use EM priority rules. */
937 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
938 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
939 {
940 if (rcPassUp < rcOldPassUp)
941 {
942 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
943 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
944 }
945 else
946 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
947 }
948 /* Override EM scheduling with specific status code. */
949 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
950 {
951 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
952 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
953 }
954 /* Don't override specific status code, first come first served. */
955 else
956 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
957 return VINF_SUCCESS;
958}
959
960
961/**
962 * Calculates the CPU mode.
963 *
964 * This is mainly for updating IEMCPU::enmCpuMode.
965 *
966 * @returns CPU mode.
967 * @param pCtx The register context for the CPU.
968 */
969DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
970{
971 if (CPUMIsGuestIn64BitCodeEx(pCtx))
972 return IEMMODE_64BIT;
973 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
974 return IEMMODE_32BIT;
975 return IEMMODE_16BIT;
976}
977
978
979/**
980 * Initializes the execution state.
981 *
982 * @param pVCpu The cross context virtual CPU structure of the
983 * calling thread.
984 * @param fBypassHandlers Whether to bypass access handlers.
985 *
986 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
987 * side-effects in strict builds.
988 */
989DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
990{
991 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
992
993 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
994
995#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
996 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
997 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
998 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
999 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1000 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1001 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1002 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1003 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1004#endif
1005
1006#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1007 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1008#endif
1009 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1010 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1011#ifdef VBOX_STRICT
1012 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1013 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1014 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1015 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1016 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1017 pVCpu->iem.s.uRexReg = 127;
1018 pVCpu->iem.s.uRexB = 127;
1019 pVCpu->iem.s.uRexIndex = 127;
1020 pVCpu->iem.s.iEffSeg = 127;
1021 pVCpu->iem.s.idxPrefix = 127;
1022 pVCpu->iem.s.uVex3rdReg = 127;
1023 pVCpu->iem.s.uVexLength = 127;
1024 pVCpu->iem.s.fEvexStuff = 127;
1025 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1026# ifdef IEM_WITH_CODE_TLB
1027 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1028 pVCpu->iem.s.pbInstrBuf = NULL;
1029 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1030 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1031 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1032 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1033# else
1034 pVCpu->iem.s.offOpcode = 127;
1035 pVCpu->iem.s.cbOpcode = 127;
1036# endif
1037#endif
1038
1039 pVCpu->iem.s.cActiveMappings = 0;
1040 pVCpu->iem.s.iNextMapping = 0;
1041 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1042 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1043#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1044 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1045 && pCtx->cs.u64Base == 0
1046 && pCtx->cs.u32Limit == UINT32_MAX
1047 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1048 if (!pVCpu->iem.s.fInPatchCode)
1049 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1050#endif
1051
1052#ifdef IEM_VERIFICATION_MODE_FULL
1053 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1054 pVCpu->iem.s.fNoRem = true;
1055#endif
1056}
1057
1058#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1059/**
1060 * Performs a minimal reinitialization of the execution state.
1061 *
1062 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1063 * 'world-switch' types operations on the CPU. Currently only nested
1064 * hardware-virtualization uses it.
1065 *
1066 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1067 */
1068IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1069{
1070 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1071 IEMMODE const enmMode = iemCalcCpuMode(pCtx);
1072 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1073
1074 pVCpu->iem.s.uCpl = uCpl;
1075 pVCpu->iem.s.enmCpuMode = enmMode;
1076 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1077 pVCpu->iem.s.enmEffAddrMode = enmMode;
1078 if (enmMode != IEMMODE_64BIT)
1079 {
1080 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1081 pVCpu->iem.s.enmEffOpSize = enmMode;
1082 }
1083 else
1084 {
1085 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1086 pVCpu->iem.s.enmEffOpSize = enmMode;
1087 }
1088 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1089#ifndef IEM_WITH_CODE_TLB
1090 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1091 pVCpu->iem.s.offOpcode = 0;
1092 pVCpu->iem.s.cbOpcode = 0;
1093#endif
1094}
1095#endif
1096
1097/**
1098 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1099 *
1100 * @param pVCpu The cross context virtual CPU structure of the
1101 * calling thread.
1102 */
1103DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1104{
1105 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1106#ifdef IEM_VERIFICATION_MODE_FULL
1107 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1108#endif
1109#ifdef VBOX_STRICT
1110# ifdef IEM_WITH_CODE_TLB
1111 NOREF(pVCpu);
1112# else
1113 pVCpu->iem.s.cbOpcode = 0;
1114# endif
1115#else
1116 NOREF(pVCpu);
1117#endif
1118}
1119
1120
1121/**
1122 * Initializes the decoder state.
1123 *
1124 * iemReInitDecoder is mostly a copy of this function.
1125 *
1126 * @param pVCpu The cross context virtual CPU structure of the
1127 * calling thread.
1128 * @param fBypassHandlers Whether to bypass access handlers.
1129 */
1130DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1131{
1132 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1133
1134 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1135
1136#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1137 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1138 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1139 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1140 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1141 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1142 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1143 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1144 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1145#endif
1146
1147#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1148 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1149#endif
1150 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1151#ifdef IEM_VERIFICATION_MODE_FULL
1152 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1153 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1154#endif
1155 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1156 pVCpu->iem.s.enmCpuMode = enmMode;
1157 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1158 pVCpu->iem.s.enmEffAddrMode = enmMode;
1159 if (enmMode != IEMMODE_64BIT)
1160 {
1161 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1162 pVCpu->iem.s.enmEffOpSize = enmMode;
1163 }
1164 else
1165 {
1166 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1167 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1168 }
1169 pVCpu->iem.s.fPrefixes = 0;
1170 pVCpu->iem.s.uRexReg = 0;
1171 pVCpu->iem.s.uRexB = 0;
1172 pVCpu->iem.s.uRexIndex = 0;
1173 pVCpu->iem.s.idxPrefix = 0;
1174 pVCpu->iem.s.uVex3rdReg = 0;
1175 pVCpu->iem.s.uVexLength = 0;
1176 pVCpu->iem.s.fEvexStuff = 0;
1177 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1178#ifdef IEM_WITH_CODE_TLB
1179 pVCpu->iem.s.pbInstrBuf = NULL;
1180 pVCpu->iem.s.offInstrNextByte = 0;
1181 pVCpu->iem.s.offCurInstrStart = 0;
1182# ifdef VBOX_STRICT
1183 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1184 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1185 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1186# endif
1187#else
1188 pVCpu->iem.s.offOpcode = 0;
1189 pVCpu->iem.s.cbOpcode = 0;
1190#endif
1191 pVCpu->iem.s.cActiveMappings = 0;
1192 pVCpu->iem.s.iNextMapping = 0;
1193 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1194 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1195#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1196 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1197 && pCtx->cs.u64Base == 0
1198 && pCtx->cs.u32Limit == UINT32_MAX
1199 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1200 if (!pVCpu->iem.s.fInPatchCode)
1201 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1202#endif
1203
1204#ifdef DBGFTRACE_ENABLED
1205 switch (enmMode)
1206 {
1207 case IEMMODE_64BIT:
1208 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1209 break;
1210 case IEMMODE_32BIT:
1211 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1212 break;
1213 case IEMMODE_16BIT:
1214 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1215 break;
1216 }
1217#endif
1218}
1219
1220
1221/**
1222 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1223 *
1224 * This is mostly a copy of iemInitDecoder.
1225 *
1226 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1227 */
1228DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1229{
1230 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1231
1232 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1233
1234#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1235 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1236 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1237 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1238 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1239 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1240 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1241 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1242 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1243#endif
1244
1245 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1246#ifdef IEM_VERIFICATION_MODE_FULL
1247 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1248 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1249#endif
1250 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1251 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1252 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1253 pVCpu->iem.s.enmEffAddrMode = enmMode;
1254 if (enmMode != IEMMODE_64BIT)
1255 {
1256 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1257 pVCpu->iem.s.enmEffOpSize = enmMode;
1258 }
1259 else
1260 {
1261 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1262 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1263 }
1264 pVCpu->iem.s.fPrefixes = 0;
1265 pVCpu->iem.s.uRexReg = 0;
1266 pVCpu->iem.s.uRexB = 0;
1267 pVCpu->iem.s.uRexIndex = 0;
1268 pVCpu->iem.s.idxPrefix = 0;
1269 pVCpu->iem.s.uVex3rdReg = 0;
1270 pVCpu->iem.s.uVexLength = 0;
1271 pVCpu->iem.s.fEvexStuff = 0;
1272 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1273#ifdef IEM_WITH_CODE_TLB
1274 if (pVCpu->iem.s.pbInstrBuf)
1275 {
1276 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1277 - pVCpu->iem.s.uInstrBufPc;
1278 if (off < pVCpu->iem.s.cbInstrBufTotal)
1279 {
1280 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1281 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1282 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1283 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1284 else
1285 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1286 }
1287 else
1288 {
1289 pVCpu->iem.s.pbInstrBuf = NULL;
1290 pVCpu->iem.s.offInstrNextByte = 0;
1291 pVCpu->iem.s.offCurInstrStart = 0;
1292 pVCpu->iem.s.cbInstrBuf = 0;
1293 pVCpu->iem.s.cbInstrBufTotal = 0;
1294 }
1295 }
1296 else
1297 {
1298 pVCpu->iem.s.offInstrNextByte = 0;
1299 pVCpu->iem.s.offCurInstrStart = 0;
1300 pVCpu->iem.s.cbInstrBuf = 0;
1301 pVCpu->iem.s.cbInstrBufTotal = 0;
1302 }
1303#else
1304 pVCpu->iem.s.cbOpcode = 0;
1305 pVCpu->iem.s.offOpcode = 0;
1306#endif
1307 Assert(pVCpu->iem.s.cActiveMappings == 0);
1308 pVCpu->iem.s.iNextMapping = 0;
1309 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1310 Assert(pVCpu->iem.s.fBypassHandlers == false);
1311#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1312 if (!pVCpu->iem.s.fInPatchCode)
1313 { /* likely */ }
1314 else
1315 {
1316 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1317 && pCtx->cs.u64Base == 0
1318 && pCtx->cs.u32Limit == UINT32_MAX
1319 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1320 if (!pVCpu->iem.s.fInPatchCode)
1321 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1322 }
1323#endif
1324
1325#ifdef DBGFTRACE_ENABLED
1326 switch (enmMode)
1327 {
1328 case IEMMODE_64BIT:
1329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1330 break;
1331 case IEMMODE_32BIT:
1332 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1333 break;
1334 case IEMMODE_16BIT:
1335 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1336 break;
1337 }
1338#endif
1339}
1340
1341
1342
1343/**
1344 * Prefetch opcodes the first time when starting executing.
1345 *
1346 * @returns Strict VBox status code.
1347 * @param pVCpu The cross context virtual CPU structure of the
1348 * calling thread.
1349 * @param fBypassHandlers Whether to bypass access handlers.
1350 */
1351IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1352{
1353#ifdef IEM_VERIFICATION_MODE_FULL
1354 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1355#endif
1356 iemInitDecoder(pVCpu, fBypassHandlers);
1357
1358#ifdef IEM_WITH_CODE_TLB
1359 /** @todo Do ITLB lookup here. */
1360
1361#else /* !IEM_WITH_CODE_TLB */
1362
1363 /*
1364 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1365 *
1366 * First translate CS:rIP to a physical address.
1367 */
1368 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1369 uint32_t cbToTryRead;
1370 RTGCPTR GCPtrPC;
1371 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1372 {
1373 cbToTryRead = PAGE_SIZE;
1374 GCPtrPC = pCtx->rip;
1375 if (IEM_IS_CANONICAL(GCPtrPC))
1376 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1377 else
1378 return iemRaiseGeneralProtectionFault0(pVCpu);
1379 }
1380 else
1381 {
1382 uint32_t GCPtrPC32 = pCtx->eip;
1383 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1384 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1385 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1386 else
1387 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1388 if (cbToTryRead) { /* likely */ }
1389 else /* overflowed */
1390 {
1391 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1392 cbToTryRead = UINT32_MAX;
1393 }
1394 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1395 Assert(GCPtrPC <= UINT32_MAX);
1396 }
1397
1398# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1399 /* Allow interpretation of patch manager code blocks since they can for
1400 instance throw #PFs for perfectly good reasons. */
1401 if (pVCpu->iem.s.fInPatchCode)
1402 {
1403 size_t cbRead = 0;
1404 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1405 AssertRCReturn(rc, rc);
1406 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1407 return VINF_SUCCESS;
1408 }
1409# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1410
1411 RTGCPHYS GCPhys;
1412 uint64_t fFlags;
1413 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1414 if (RT_SUCCESS(rc)) { /* probable */ }
1415 else
1416 {
1417 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1418 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1419 }
1420 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1421 else
1422 {
1423 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1424 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1425 }
1426 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1427 else
1428 {
1429 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1430 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1431 }
1432 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1433 /** @todo Check reserved bits and such stuff. PGM is better at doing
1434 * that, so do it when implementing the guest virtual address
1435 * TLB... */
1436
1437# ifdef IEM_VERIFICATION_MODE_FULL
1438 /*
1439 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1440 * instruction.
1441 */
1442 /** @todo optimize this differently by not using PGMPhysRead. */
1443 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1444 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1445 if ( offPrevOpcodes < cbOldOpcodes
1446 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1447 {
1448 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1449 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1450 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1451 pVCpu->iem.s.cbOpcode = cbNew;
1452 return VINF_SUCCESS;
1453 }
1454# endif
1455
1456 /*
1457 * Read the bytes at this address.
1458 */
1459 PVM pVM = pVCpu->CTX_SUFF(pVM);
1460# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1461 size_t cbActual;
1462 if ( PATMIsEnabled(pVM)
1463 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1464 {
1465 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1466 Assert(cbActual > 0);
1467 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1468 }
1469 else
1470# endif
1471 {
1472 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1473 if (cbToTryRead > cbLeftOnPage)
1474 cbToTryRead = cbLeftOnPage;
1475 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1476 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1477
1478 if (!pVCpu->iem.s.fBypassHandlers)
1479 {
1480 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1481 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1482 { /* likely */ }
1483 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1484 {
1485 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1486 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1487 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1488 }
1489 else
1490 {
1491 Log((RT_SUCCESS(rcStrict)
1492 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1493 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1494 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1495 return rcStrict;
1496 }
1497 }
1498 else
1499 {
1500 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1501 if (RT_SUCCESS(rc))
1502 { /* likely */ }
1503 else
1504 {
1505 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1506 GCPtrPC, GCPhys, rc, cbToTryRead));
1507 return rc;
1508 }
1509 }
1510 pVCpu->iem.s.cbOpcode = cbToTryRead;
1511 }
1512#endif /* !IEM_WITH_CODE_TLB */
1513 return VINF_SUCCESS;
1514}
1515
1516
1517/**
1518 * Invalidates the IEM TLBs.
1519 *
1520 * This is called internally as well as by PGM when moving GC mappings.
1521 *
1522 * @returns
1523 * @param pVCpu The cross context virtual CPU structure of the calling
1524 * thread.
1525 * @param fVmm Set when PGM calls us with a remapping.
1526 */
1527VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1528{
1529#ifdef IEM_WITH_CODE_TLB
1530 pVCpu->iem.s.cbInstrBufTotal = 0;
1531 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1532 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1533 { /* very likely */ }
1534 else
1535 {
1536 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1537 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1538 while (i-- > 0)
1539 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1540 }
1541#endif
1542
1543#ifdef IEM_WITH_DATA_TLB
1544 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1545 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1546 { /* very likely */ }
1547 else
1548 {
1549 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1550 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1551 while (i-- > 0)
1552 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1553 }
1554#endif
1555 NOREF(pVCpu); NOREF(fVmm);
1556}
1557
1558
1559/**
1560 * Invalidates a page in the TLBs.
1561 *
1562 * @param pVCpu The cross context virtual CPU structure of the calling
1563 * thread.
1564 * @param GCPtr The address of the page to invalidate
1565 */
1566VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1567{
1568#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1569 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1570 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1571 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1572 uintptr_t idx = (uint8_t)GCPtr;
1573
1574# ifdef IEM_WITH_CODE_TLB
1575 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1576 {
1577 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1578 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1579 pVCpu->iem.s.cbInstrBufTotal = 0;
1580 }
1581# endif
1582
1583# ifdef IEM_WITH_DATA_TLB
1584 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1585 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1586# endif
1587#else
1588 NOREF(pVCpu); NOREF(GCPtr);
1589#endif
1590}
1591
1592
1593/**
1594 * Invalidates the host physical aspects of the IEM TLBs.
1595 *
1596 * This is called internally as well as by PGM when moving GC mappings.
1597 *
1598 * @param pVCpu The cross context virtual CPU structure of the calling
1599 * thread.
1600 */
1601VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1602{
1603#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1604 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1605
1606# ifdef IEM_WITH_CODE_TLB
1607 pVCpu->iem.s.cbInstrBufTotal = 0;
1608# endif
1609 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1610 if (uTlbPhysRev != 0)
1611 {
1612 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1613 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1614 }
1615 else
1616 {
1617 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1618 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1619
1620 unsigned i;
1621# ifdef IEM_WITH_CODE_TLB
1622 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1623 while (i-- > 0)
1624 {
1625 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1626 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1627 }
1628# endif
1629# ifdef IEM_WITH_DATA_TLB
1630 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1631 while (i-- > 0)
1632 {
1633 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1634 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1635 }
1636# endif
1637 }
1638#else
1639 NOREF(pVCpu);
1640#endif
1641}
1642
1643
1644/**
1645 * Invalidates the host physical aspects of the IEM TLBs.
1646 *
1647 * This is called internally as well as by PGM when moving GC mappings.
1648 *
1649 * @param pVM The cross context VM structure.
1650 *
1651 * @remarks Caller holds the PGM lock.
1652 */
1653VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1654{
1655 RT_NOREF_PV(pVM);
1656}
1657
1658#ifdef IEM_WITH_CODE_TLB
1659
1660/**
1661 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1662 * failure and jumps.
1663 *
1664 * We end up here for a number of reasons:
1665 * - pbInstrBuf isn't yet initialized.
1666 * - Advancing beyond the buffer boundrary (e.g. cross page).
1667 * - Advancing beyond the CS segment limit.
1668 * - Fetching from non-mappable page (e.g. MMIO).
1669 *
1670 * @param pVCpu The cross context virtual CPU structure of the
1671 * calling thread.
1672 * @param pvDst Where to return the bytes.
1673 * @param cbDst Number of bytes to read.
1674 *
1675 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1676 */
1677IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1678{
1679#ifdef IN_RING3
1680//__debugbreak();
1681 for (;;)
1682 {
1683 Assert(cbDst <= 8);
1684 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1685
1686 /*
1687 * We might have a partial buffer match, deal with that first to make the
1688 * rest simpler. This is the first part of the cross page/buffer case.
1689 */
1690 if (pVCpu->iem.s.pbInstrBuf != NULL)
1691 {
1692 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1693 {
1694 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1695 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1696 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1697
1698 cbDst -= cbCopy;
1699 pvDst = (uint8_t *)pvDst + cbCopy;
1700 offBuf += cbCopy;
1701 pVCpu->iem.s.offInstrNextByte += offBuf;
1702 }
1703 }
1704
1705 /*
1706 * Check segment limit, figuring how much we're allowed to access at this point.
1707 *
1708 * We will fault immediately if RIP is past the segment limit / in non-canonical
1709 * territory. If we do continue, there are one or more bytes to read before we
1710 * end up in trouble and we need to do that first before faulting.
1711 */
1712 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1713 RTGCPTR GCPtrFirst;
1714 uint32_t cbMaxRead;
1715 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1716 {
1717 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1718 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1719 { /* likely */ }
1720 else
1721 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1722 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1723 }
1724 else
1725 {
1726 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1727 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1728 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1729 { /* likely */ }
1730 else
1731 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1732 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1733 if (cbMaxRead != 0)
1734 { /* likely */ }
1735 else
1736 {
1737 /* Overflowed because address is 0 and limit is max. */
1738 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1739 cbMaxRead = X86_PAGE_SIZE;
1740 }
1741 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1742 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1743 if (cbMaxRead2 < cbMaxRead)
1744 cbMaxRead = cbMaxRead2;
1745 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1746 }
1747
1748 /*
1749 * Get the TLB entry for this piece of code.
1750 */
1751 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1752 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1753 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1754 if (pTlbe->uTag == uTag)
1755 {
1756 /* likely when executing lots of code, otherwise unlikely */
1757# ifdef VBOX_WITH_STATISTICS
1758 pVCpu->iem.s.CodeTlb.cTlbHits++;
1759# endif
1760 }
1761 else
1762 {
1763 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1764# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1765 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1766 {
1767 pTlbe->uTag = uTag;
1768 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1769 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1770 pTlbe->GCPhys = NIL_RTGCPHYS;
1771 pTlbe->pbMappingR3 = NULL;
1772 }
1773 else
1774# endif
1775 {
1776 RTGCPHYS GCPhys;
1777 uint64_t fFlags;
1778 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1779 if (RT_FAILURE(rc))
1780 {
1781 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1782 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1783 }
1784
1785 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1786 pTlbe->uTag = uTag;
1787 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1788 pTlbe->GCPhys = GCPhys;
1789 pTlbe->pbMappingR3 = NULL;
1790 }
1791 }
1792
1793 /*
1794 * Check TLB page table level access flags.
1795 */
1796 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1797 {
1798 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1799 {
1800 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1801 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1802 }
1803 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1804 {
1805 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1806 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1807 }
1808 }
1809
1810# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1811 /*
1812 * Allow interpretation of patch manager code blocks since they can for
1813 * instance throw #PFs for perfectly good reasons.
1814 */
1815 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1816 { /* no unlikely */ }
1817 else
1818 {
1819 /** @todo Could be optimized this a little in ring-3 if we liked. */
1820 size_t cbRead = 0;
1821 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1822 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1823 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1824 return;
1825 }
1826# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1827
1828 /*
1829 * Look up the physical page info if necessary.
1830 */
1831 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1832 { /* not necessary */ }
1833 else
1834 {
1835 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1836 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1837 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1838 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1839 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1840 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1841 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1842 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1843 }
1844
1845# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1846 /*
1847 * Try do a direct read using the pbMappingR3 pointer.
1848 */
1849 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1850 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1851 {
1852 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1853 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1854 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1855 {
1856 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1857 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1858 }
1859 else
1860 {
1861 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1862 Assert(cbInstr < cbMaxRead);
1863 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1864 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1865 }
1866 if (cbDst <= cbMaxRead)
1867 {
1868 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1869 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1870 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1871 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1872 return;
1873 }
1874 pVCpu->iem.s.pbInstrBuf = NULL;
1875
1876 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1877 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1878 }
1879 else
1880# endif
1881#if 0
1882 /*
1883 * If there is no special read handling, so we can read a bit more and
1884 * put it in the prefetch buffer.
1885 */
1886 if ( cbDst < cbMaxRead
1887 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1888 {
1889 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1890 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1891 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1892 { /* likely */ }
1893 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1894 {
1895 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1896 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1897 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1898 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1899 }
1900 else
1901 {
1902 Log((RT_SUCCESS(rcStrict)
1903 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1904 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1905 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1906 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1907 }
1908 }
1909 /*
1910 * Special read handling, so only read exactly what's needed.
1911 * This is a highly unlikely scenario.
1912 */
1913 else
1914#endif
1915 {
1916 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1917 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1918 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1919 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1920 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1921 { /* likely */ }
1922 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1923 {
1924 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1925 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1926 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1927 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1928 }
1929 else
1930 {
1931 Log((RT_SUCCESS(rcStrict)
1932 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1933 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1934 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1935 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1936 }
1937 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1938 if (cbToRead == cbDst)
1939 return;
1940 }
1941
1942 /*
1943 * More to read, loop.
1944 */
1945 cbDst -= cbMaxRead;
1946 pvDst = (uint8_t *)pvDst + cbMaxRead;
1947 }
1948#else
1949 RT_NOREF(pvDst, cbDst);
1950 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1951#endif
1952}
1953
1954#else
1955
1956/**
1957 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1958 * exception if it fails.
1959 *
1960 * @returns Strict VBox status code.
1961 * @param pVCpu The cross context virtual CPU structure of the
1962 * calling thread.
1963 * @param cbMin The minimum number of bytes relative offOpcode
1964 * that must be read.
1965 */
1966IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1967{
1968 /*
1969 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1970 *
1971 * First translate CS:rIP to a physical address.
1972 */
1973 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1974 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1975 uint32_t cbToTryRead;
1976 RTGCPTR GCPtrNext;
1977 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1978 {
1979 cbToTryRead = PAGE_SIZE;
1980 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1981 if (!IEM_IS_CANONICAL(GCPtrNext))
1982 return iemRaiseGeneralProtectionFault0(pVCpu);
1983 }
1984 else
1985 {
1986 uint32_t GCPtrNext32 = pCtx->eip;
1987 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1988 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1989 if (GCPtrNext32 > pCtx->cs.u32Limit)
1990 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1991 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1992 if (!cbToTryRead) /* overflowed */
1993 {
1994 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1995 cbToTryRead = UINT32_MAX;
1996 /** @todo check out wrapping around the code segment. */
1997 }
1998 if (cbToTryRead < cbMin - cbLeft)
1999 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2000 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
2001 }
2002
2003 /* Only read up to the end of the page, and make sure we don't read more
2004 than the opcode buffer can hold. */
2005 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2006 if (cbToTryRead > cbLeftOnPage)
2007 cbToTryRead = cbLeftOnPage;
2008 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2009 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2010/** @todo r=bird: Convert assertion into undefined opcode exception? */
2011 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2012
2013# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2014 /* Allow interpretation of patch manager code blocks since they can for
2015 instance throw #PFs for perfectly good reasons. */
2016 if (pVCpu->iem.s.fInPatchCode)
2017 {
2018 size_t cbRead = 0;
2019 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2020 AssertRCReturn(rc, rc);
2021 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2022 return VINF_SUCCESS;
2023 }
2024# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2025
2026 RTGCPHYS GCPhys;
2027 uint64_t fFlags;
2028 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2029 if (RT_FAILURE(rc))
2030 {
2031 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2032 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2033 }
2034 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2035 {
2036 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2037 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2038 }
2039 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2040 {
2041 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2042 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2043 }
2044 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2045 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2046 /** @todo Check reserved bits and such stuff. PGM is better at doing
2047 * that, so do it when implementing the guest virtual address
2048 * TLB... */
2049
2050 /*
2051 * Read the bytes at this address.
2052 *
2053 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2054 * and since PATM should only patch the start of an instruction there
2055 * should be no need to check again here.
2056 */
2057 if (!pVCpu->iem.s.fBypassHandlers)
2058 {
2059 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2060 cbToTryRead, PGMACCESSORIGIN_IEM);
2061 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2062 { /* likely */ }
2063 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2064 {
2065 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2066 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2067 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2068 }
2069 else
2070 {
2071 Log((RT_SUCCESS(rcStrict)
2072 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2073 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2074 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2075 return rcStrict;
2076 }
2077 }
2078 else
2079 {
2080 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2081 if (RT_SUCCESS(rc))
2082 { /* likely */ }
2083 else
2084 {
2085 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2086 return rc;
2087 }
2088 }
2089 pVCpu->iem.s.cbOpcode += cbToTryRead;
2090 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2091
2092 return VINF_SUCCESS;
2093}
2094
2095#endif /* !IEM_WITH_CODE_TLB */
2096#ifndef IEM_WITH_SETJMP
2097
2098/**
2099 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2100 *
2101 * @returns Strict VBox status code.
2102 * @param pVCpu The cross context virtual CPU structure of the
2103 * calling thread.
2104 * @param pb Where to return the opcode byte.
2105 */
2106DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2107{
2108 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2109 if (rcStrict == VINF_SUCCESS)
2110 {
2111 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2112 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2113 pVCpu->iem.s.offOpcode = offOpcode + 1;
2114 }
2115 else
2116 *pb = 0;
2117 return rcStrict;
2118}
2119
2120
2121/**
2122 * Fetches the next opcode byte.
2123 *
2124 * @returns Strict VBox status code.
2125 * @param pVCpu The cross context virtual CPU structure of the
2126 * calling thread.
2127 * @param pu8 Where to return the opcode byte.
2128 */
2129DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2130{
2131 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2132 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2133 {
2134 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2135 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2136 return VINF_SUCCESS;
2137 }
2138 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2139}
2140
2141#else /* IEM_WITH_SETJMP */
2142
2143/**
2144 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2145 *
2146 * @returns The opcode byte.
2147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2148 */
2149DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2150{
2151# ifdef IEM_WITH_CODE_TLB
2152 uint8_t u8;
2153 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2154 return u8;
2155# else
2156 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2157 if (rcStrict == VINF_SUCCESS)
2158 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2159 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2160# endif
2161}
2162
2163
2164/**
2165 * Fetches the next opcode byte, longjmp on error.
2166 *
2167 * @returns The opcode byte.
2168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2169 */
2170DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2171{
2172# ifdef IEM_WITH_CODE_TLB
2173 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2174 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2175 if (RT_LIKELY( pbBuf != NULL
2176 && offBuf < pVCpu->iem.s.cbInstrBuf))
2177 {
2178 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2179 return pbBuf[offBuf];
2180 }
2181# else
2182 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2183 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2184 {
2185 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2186 return pVCpu->iem.s.abOpcode[offOpcode];
2187 }
2188# endif
2189 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2190}
2191
2192#endif /* IEM_WITH_SETJMP */
2193
2194/**
2195 * Fetches the next opcode byte, returns automatically on failure.
2196 *
2197 * @param a_pu8 Where to return the opcode byte.
2198 * @remark Implicitly references pVCpu.
2199 */
2200#ifndef IEM_WITH_SETJMP
2201# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2202 do \
2203 { \
2204 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2205 if (rcStrict2 == VINF_SUCCESS) \
2206 { /* likely */ } \
2207 else \
2208 return rcStrict2; \
2209 } while (0)
2210#else
2211# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2212#endif /* IEM_WITH_SETJMP */
2213
2214
2215#ifndef IEM_WITH_SETJMP
2216/**
2217 * Fetches the next signed byte from the opcode stream.
2218 *
2219 * @returns Strict VBox status code.
2220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2221 * @param pi8 Where to return the signed byte.
2222 */
2223DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2224{
2225 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2226}
2227#endif /* !IEM_WITH_SETJMP */
2228
2229
2230/**
2231 * Fetches the next signed byte from the opcode stream, returning automatically
2232 * on failure.
2233 *
2234 * @param a_pi8 Where to return the signed byte.
2235 * @remark Implicitly references pVCpu.
2236 */
2237#ifndef IEM_WITH_SETJMP
2238# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2239 do \
2240 { \
2241 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2242 if (rcStrict2 != VINF_SUCCESS) \
2243 return rcStrict2; \
2244 } while (0)
2245#else /* IEM_WITH_SETJMP */
2246# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2247
2248#endif /* IEM_WITH_SETJMP */
2249
2250#ifndef IEM_WITH_SETJMP
2251
2252/**
2253 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2254 *
2255 * @returns Strict VBox status code.
2256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2257 * @param pu16 Where to return the opcode dword.
2258 */
2259DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2260{
2261 uint8_t u8;
2262 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2263 if (rcStrict == VINF_SUCCESS)
2264 *pu16 = (int8_t)u8;
2265 return rcStrict;
2266}
2267
2268
2269/**
2270 * Fetches the next signed byte from the opcode stream, extending it to
2271 * unsigned 16-bit.
2272 *
2273 * @returns Strict VBox status code.
2274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2275 * @param pu16 Where to return the unsigned word.
2276 */
2277DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2278{
2279 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2280 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2281 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2282
2283 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2284 pVCpu->iem.s.offOpcode = offOpcode + 1;
2285 return VINF_SUCCESS;
2286}
2287
2288#endif /* !IEM_WITH_SETJMP */
2289
2290/**
2291 * Fetches the next signed byte from the opcode stream and sign-extending it to
2292 * a word, returning automatically on failure.
2293 *
2294 * @param a_pu16 Where to return the word.
2295 * @remark Implicitly references pVCpu.
2296 */
2297#ifndef IEM_WITH_SETJMP
2298# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2299 do \
2300 { \
2301 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2302 if (rcStrict2 != VINF_SUCCESS) \
2303 return rcStrict2; \
2304 } while (0)
2305#else
2306# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2307#endif
2308
2309#ifndef IEM_WITH_SETJMP
2310
2311/**
2312 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2313 *
2314 * @returns Strict VBox status code.
2315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2316 * @param pu32 Where to return the opcode dword.
2317 */
2318DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2319{
2320 uint8_t u8;
2321 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2322 if (rcStrict == VINF_SUCCESS)
2323 *pu32 = (int8_t)u8;
2324 return rcStrict;
2325}
2326
2327
2328/**
2329 * Fetches the next signed byte from the opcode stream, extending it to
2330 * unsigned 32-bit.
2331 *
2332 * @returns Strict VBox status code.
2333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2334 * @param pu32 Where to return the unsigned dword.
2335 */
2336DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2337{
2338 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2339 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2340 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2341
2342 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2343 pVCpu->iem.s.offOpcode = offOpcode + 1;
2344 return VINF_SUCCESS;
2345}
2346
2347#endif /* !IEM_WITH_SETJMP */
2348
2349/**
2350 * Fetches the next signed byte from the opcode stream and sign-extending it to
2351 * a word, returning automatically on failure.
2352 *
2353 * @param a_pu32 Where to return the word.
2354 * @remark Implicitly references pVCpu.
2355 */
2356#ifndef IEM_WITH_SETJMP
2357#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2358 do \
2359 { \
2360 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2361 if (rcStrict2 != VINF_SUCCESS) \
2362 return rcStrict2; \
2363 } while (0)
2364#else
2365# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2366#endif
2367
2368#ifndef IEM_WITH_SETJMP
2369
2370/**
2371 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2372 *
2373 * @returns Strict VBox status code.
2374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2375 * @param pu64 Where to return the opcode qword.
2376 */
2377DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2378{
2379 uint8_t u8;
2380 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2381 if (rcStrict == VINF_SUCCESS)
2382 *pu64 = (int8_t)u8;
2383 return rcStrict;
2384}
2385
2386
2387/**
2388 * Fetches the next signed byte from the opcode stream, extending it to
2389 * unsigned 64-bit.
2390 *
2391 * @returns Strict VBox status code.
2392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2393 * @param pu64 Where to return the unsigned qword.
2394 */
2395DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2396{
2397 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2398 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2399 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2400
2401 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2402 pVCpu->iem.s.offOpcode = offOpcode + 1;
2403 return VINF_SUCCESS;
2404}
2405
2406#endif /* !IEM_WITH_SETJMP */
2407
2408
2409/**
2410 * Fetches the next signed byte from the opcode stream and sign-extending it to
2411 * a word, returning automatically on failure.
2412 *
2413 * @param a_pu64 Where to return the word.
2414 * @remark Implicitly references pVCpu.
2415 */
2416#ifndef IEM_WITH_SETJMP
2417# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2418 do \
2419 { \
2420 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2421 if (rcStrict2 != VINF_SUCCESS) \
2422 return rcStrict2; \
2423 } while (0)
2424#else
2425# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2426#endif
2427
2428
2429#ifndef IEM_WITH_SETJMP
2430
2431/**
2432 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2433 *
2434 * @returns Strict VBox status code.
2435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2436 * @param pu16 Where to return the opcode word.
2437 */
2438DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2439{
2440 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2441 if (rcStrict == VINF_SUCCESS)
2442 {
2443 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2444# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2445 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2446# else
2447 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2448# endif
2449 pVCpu->iem.s.offOpcode = offOpcode + 2;
2450 }
2451 else
2452 *pu16 = 0;
2453 return rcStrict;
2454}
2455
2456
2457/**
2458 * Fetches the next opcode word.
2459 *
2460 * @returns Strict VBox status code.
2461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2462 * @param pu16 Where to return the opcode word.
2463 */
2464DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2465{
2466 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2467 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2468 {
2469 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2470# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2471 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2472# else
2473 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2474# endif
2475 return VINF_SUCCESS;
2476 }
2477 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2478}
2479
2480#else /* IEM_WITH_SETJMP */
2481
2482/**
2483 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2484 *
2485 * @returns The opcode word.
2486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2487 */
2488DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2489{
2490# ifdef IEM_WITH_CODE_TLB
2491 uint16_t u16;
2492 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2493 return u16;
2494# else
2495 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2496 if (rcStrict == VINF_SUCCESS)
2497 {
2498 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2499 pVCpu->iem.s.offOpcode += 2;
2500# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2501 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2502# else
2503 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2504# endif
2505 }
2506 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2507# endif
2508}
2509
2510
2511/**
2512 * Fetches the next opcode word, longjmp on error.
2513 *
2514 * @returns The opcode word.
2515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2516 */
2517DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2518{
2519# ifdef IEM_WITH_CODE_TLB
2520 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2521 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2522 if (RT_LIKELY( pbBuf != NULL
2523 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2524 {
2525 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2526# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2527 return *(uint16_t const *)&pbBuf[offBuf];
2528# else
2529 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2530# endif
2531 }
2532# else
2533 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2534 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2535 {
2536 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2537# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2538 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2539# else
2540 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2541# endif
2542 }
2543# endif
2544 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2545}
2546
2547#endif /* IEM_WITH_SETJMP */
2548
2549
2550/**
2551 * Fetches the next opcode word, returns automatically on failure.
2552 *
2553 * @param a_pu16 Where to return the opcode word.
2554 * @remark Implicitly references pVCpu.
2555 */
2556#ifndef IEM_WITH_SETJMP
2557# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2558 do \
2559 { \
2560 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2561 if (rcStrict2 != VINF_SUCCESS) \
2562 return rcStrict2; \
2563 } while (0)
2564#else
2565# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2566#endif
2567
2568#ifndef IEM_WITH_SETJMP
2569
2570/**
2571 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2572 *
2573 * @returns Strict VBox status code.
2574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2575 * @param pu32 Where to return the opcode double word.
2576 */
2577DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2578{
2579 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2580 if (rcStrict == VINF_SUCCESS)
2581 {
2582 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2583 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2584 pVCpu->iem.s.offOpcode = offOpcode + 2;
2585 }
2586 else
2587 *pu32 = 0;
2588 return rcStrict;
2589}
2590
2591
2592/**
2593 * Fetches the next opcode word, zero extending it to a double word.
2594 *
2595 * @returns Strict VBox status code.
2596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2597 * @param pu32 Where to return the opcode double word.
2598 */
2599DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2600{
2601 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2602 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2603 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2604
2605 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2606 pVCpu->iem.s.offOpcode = offOpcode + 2;
2607 return VINF_SUCCESS;
2608}
2609
2610#endif /* !IEM_WITH_SETJMP */
2611
2612
2613/**
2614 * Fetches the next opcode word and zero extends it to a double word, returns
2615 * automatically on failure.
2616 *
2617 * @param a_pu32 Where to return the opcode double word.
2618 * @remark Implicitly references pVCpu.
2619 */
2620#ifndef IEM_WITH_SETJMP
2621# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2622 do \
2623 { \
2624 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2625 if (rcStrict2 != VINF_SUCCESS) \
2626 return rcStrict2; \
2627 } while (0)
2628#else
2629# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2630#endif
2631
2632#ifndef IEM_WITH_SETJMP
2633
2634/**
2635 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2636 *
2637 * @returns Strict VBox status code.
2638 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2639 * @param pu64 Where to return the opcode quad word.
2640 */
2641DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2642{
2643 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2644 if (rcStrict == VINF_SUCCESS)
2645 {
2646 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2647 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2648 pVCpu->iem.s.offOpcode = offOpcode + 2;
2649 }
2650 else
2651 *pu64 = 0;
2652 return rcStrict;
2653}
2654
2655
2656/**
2657 * Fetches the next opcode word, zero extending it to a quad word.
2658 *
2659 * @returns Strict VBox status code.
2660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2661 * @param pu64 Where to return the opcode quad word.
2662 */
2663DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2664{
2665 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2666 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2667 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2668
2669 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2670 pVCpu->iem.s.offOpcode = offOpcode + 2;
2671 return VINF_SUCCESS;
2672}
2673
2674#endif /* !IEM_WITH_SETJMP */
2675
2676/**
2677 * Fetches the next opcode word and zero extends it to a quad word, returns
2678 * automatically on failure.
2679 *
2680 * @param a_pu64 Where to return the opcode quad word.
2681 * @remark Implicitly references pVCpu.
2682 */
2683#ifndef IEM_WITH_SETJMP
2684# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2685 do \
2686 { \
2687 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2688 if (rcStrict2 != VINF_SUCCESS) \
2689 return rcStrict2; \
2690 } while (0)
2691#else
2692# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2693#endif
2694
2695
2696#ifndef IEM_WITH_SETJMP
2697/**
2698 * Fetches the next signed word from the opcode stream.
2699 *
2700 * @returns Strict VBox status code.
2701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2702 * @param pi16 Where to return the signed word.
2703 */
2704DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2705{
2706 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2707}
2708#endif /* !IEM_WITH_SETJMP */
2709
2710
2711/**
2712 * Fetches the next signed word from the opcode stream, returning automatically
2713 * on failure.
2714 *
2715 * @param a_pi16 Where to return the signed word.
2716 * @remark Implicitly references pVCpu.
2717 */
2718#ifndef IEM_WITH_SETJMP
2719# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2720 do \
2721 { \
2722 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2723 if (rcStrict2 != VINF_SUCCESS) \
2724 return rcStrict2; \
2725 } while (0)
2726#else
2727# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2728#endif
2729
2730#ifndef IEM_WITH_SETJMP
2731
2732/**
2733 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2734 *
2735 * @returns Strict VBox status code.
2736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2737 * @param pu32 Where to return the opcode dword.
2738 */
2739DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2740{
2741 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2742 if (rcStrict == VINF_SUCCESS)
2743 {
2744 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2745# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2746 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2747# else
2748 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2749 pVCpu->iem.s.abOpcode[offOpcode + 1],
2750 pVCpu->iem.s.abOpcode[offOpcode + 2],
2751 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2752# endif
2753 pVCpu->iem.s.offOpcode = offOpcode + 4;
2754 }
2755 else
2756 *pu32 = 0;
2757 return rcStrict;
2758}
2759
2760
2761/**
2762 * Fetches the next opcode dword.
2763 *
2764 * @returns Strict VBox status code.
2765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2766 * @param pu32 Where to return the opcode double word.
2767 */
2768DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2769{
2770 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2771 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2772 {
2773 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2774# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2775 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2776# else
2777 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2778 pVCpu->iem.s.abOpcode[offOpcode + 1],
2779 pVCpu->iem.s.abOpcode[offOpcode + 2],
2780 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2781# endif
2782 return VINF_SUCCESS;
2783 }
2784 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2785}
2786
2787#else /* !IEM_WITH_SETJMP */
2788
2789/**
2790 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2791 *
2792 * @returns The opcode dword.
2793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2794 */
2795DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2796{
2797# ifdef IEM_WITH_CODE_TLB
2798 uint32_t u32;
2799 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2800 return u32;
2801# else
2802 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2803 if (rcStrict == VINF_SUCCESS)
2804 {
2805 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2806 pVCpu->iem.s.offOpcode = offOpcode + 4;
2807# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2808 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2809# else
2810 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2811 pVCpu->iem.s.abOpcode[offOpcode + 1],
2812 pVCpu->iem.s.abOpcode[offOpcode + 2],
2813 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2814# endif
2815 }
2816 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2817# endif
2818}
2819
2820
2821/**
2822 * Fetches the next opcode dword, longjmp on error.
2823 *
2824 * @returns The opcode dword.
2825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2826 */
2827DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2828{
2829# ifdef IEM_WITH_CODE_TLB
2830 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2831 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2832 if (RT_LIKELY( pbBuf != NULL
2833 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2834 {
2835 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2836# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2837 return *(uint32_t const *)&pbBuf[offBuf];
2838# else
2839 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2840 pbBuf[offBuf + 1],
2841 pbBuf[offBuf + 2],
2842 pbBuf[offBuf + 3]);
2843# endif
2844 }
2845# else
2846 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2847 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2848 {
2849 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2850# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2851 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2852# else
2853 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2854 pVCpu->iem.s.abOpcode[offOpcode + 1],
2855 pVCpu->iem.s.abOpcode[offOpcode + 2],
2856 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2857# endif
2858 }
2859# endif
2860 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2861}
2862
2863#endif /* !IEM_WITH_SETJMP */
2864
2865
2866/**
2867 * Fetches the next opcode dword, returns automatically on failure.
2868 *
2869 * @param a_pu32 Where to return the opcode dword.
2870 * @remark Implicitly references pVCpu.
2871 */
2872#ifndef IEM_WITH_SETJMP
2873# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2874 do \
2875 { \
2876 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2877 if (rcStrict2 != VINF_SUCCESS) \
2878 return rcStrict2; \
2879 } while (0)
2880#else
2881# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2882#endif
2883
2884#ifndef IEM_WITH_SETJMP
2885
2886/**
2887 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2888 *
2889 * @returns Strict VBox status code.
2890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2891 * @param pu64 Where to return the opcode dword.
2892 */
2893DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2894{
2895 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2896 if (rcStrict == VINF_SUCCESS)
2897 {
2898 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2899 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2900 pVCpu->iem.s.abOpcode[offOpcode + 1],
2901 pVCpu->iem.s.abOpcode[offOpcode + 2],
2902 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2903 pVCpu->iem.s.offOpcode = offOpcode + 4;
2904 }
2905 else
2906 *pu64 = 0;
2907 return rcStrict;
2908}
2909
2910
2911/**
2912 * Fetches the next opcode dword, zero extending it to a quad word.
2913 *
2914 * @returns Strict VBox status code.
2915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2916 * @param pu64 Where to return the opcode quad word.
2917 */
2918DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2919{
2920 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2921 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2922 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2923
2924 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2925 pVCpu->iem.s.abOpcode[offOpcode + 1],
2926 pVCpu->iem.s.abOpcode[offOpcode + 2],
2927 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2928 pVCpu->iem.s.offOpcode = offOpcode + 4;
2929 return VINF_SUCCESS;
2930}
2931
2932#endif /* !IEM_WITH_SETJMP */
2933
2934
2935/**
2936 * Fetches the next opcode dword and zero extends it to a quad word, returns
2937 * automatically on failure.
2938 *
2939 * @param a_pu64 Where to return the opcode quad word.
2940 * @remark Implicitly references pVCpu.
2941 */
2942#ifndef IEM_WITH_SETJMP
2943# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2944 do \
2945 { \
2946 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2947 if (rcStrict2 != VINF_SUCCESS) \
2948 return rcStrict2; \
2949 } while (0)
2950#else
2951# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2952#endif
2953
2954
2955#ifndef IEM_WITH_SETJMP
2956/**
2957 * Fetches the next signed double word from the opcode stream.
2958 *
2959 * @returns Strict VBox status code.
2960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2961 * @param pi32 Where to return the signed double word.
2962 */
2963DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2964{
2965 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2966}
2967#endif
2968
2969/**
2970 * Fetches the next signed double word from the opcode stream, returning
2971 * automatically on failure.
2972 *
2973 * @param a_pi32 Where to return the signed double word.
2974 * @remark Implicitly references pVCpu.
2975 */
2976#ifndef IEM_WITH_SETJMP
2977# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2978 do \
2979 { \
2980 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2981 if (rcStrict2 != VINF_SUCCESS) \
2982 return rcStrict2; \
2983 } while (0)
2984#else
2985# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2986#endif
2987
2988#ifndef IEM_WITH_SETJMP
2989
2990/**
2991 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2992 *
2993 * @returns Strict VBox status code.
2994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2995 * @param pu64 Where to return the opcode qword.
2996 */
2997DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2998{
2999 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3000 if (rcStrict == VINF_SUCCESS)
3001 {
3002 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3003 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3004 pVCpu->iem.s.abOpcode[offOpcode + 1],
3005 pVCpu->iem.s.abOpcode[offOpcode + 2],
3006 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3007 pVCpu->iem.s.offOpcode = offOpcode + 4;
3008 }
3009 else
3010 *pu64 = 0;
3011 return rcStrict;
3012}
3013
3014
3015/**
3016 * Fetches the next opcode dword, sign extending it into a quad word.
3017 *
3018 * @returns Strict VBox status code.
3019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3020 * @param pu64 Where to return the opcode quad word.
3021 */
3022DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3023{
3024 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3025 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3026 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3027
3028 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3029 pVCpu->iem.s.abOpcode[offOpcode + 1],
3030 pVCpu->iem.s.abOpcode[offOpcode + 2],
3031 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3032 *pu64 = i32;
3033 pVCpu->iem.s.offOpcode = offOpcode + 4;
3034 return VINF_SUCCESS;
3035}
3036
3037#endif /* !IEM_WITH_SETJMP */
3038
3039
3040/**
3041 * Fetches the next opcode double word and sign extends it to a quad word,
3042 * returns automatically on failure.
3043 *
3044 * @param a_pu64 Where to return the opcode quad word.
3045 * @remark Implicitly references pVCpu.
3046 */
3047#ifndef IEM_WITH_SETJMP
3048# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3049 do \
3050 { \
3051 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3052 if (rcStrict2 != VINF_SUCCESS) \
3053 return rcStrict2; \
3054 } while (0)
3055#else
3056# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3057#endif
3058
3059#ifndef IEM_WITH_SETJMP
3060
3061/**
3062 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3063 *
3064 * @returns Strict VBox status code.
3065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3066 * @param pu64 Where to return the opcode qword.
3067 */
3068DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3069{
3070 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3071 if (rcStrict == VINF_SUCCESS)
3072 {
3073 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3074# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3075 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3076# else
3077 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3078 pVCpu->iem.s.abOpcode[offOpcode + 1],
3079 pVCpu->iem.s.abOpcode[offOpcode + 2],
3080 pVCpu->iem.s.abOpcode[offOpcode + 3],
3081 pVCpu->iem.s.abOpcode[offOpcode + 4],
3082 pVCpu->iem.s.abOpcode[offOpcode + 5],
3083 pVCpu->iem.s.abOpcode[offOpcode + 6],
3084 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3085# endif
3086 pVCpu->iem.s.offOpcode = offOpcode + 8;
3087 }
3088 else
3089 *pu64 = 0;
3090 return rcStrict;
3091}
3092
3093
3094/**
3095 * Fetches the next opcode qword.
3096 *
3097 * @returns Strict VBox status code.
3098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3099 * @param pu64 Where to return the opcode qword.
3100 */
3101DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3102{
3103 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3104 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3105 {
3106# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3107 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3108# else
3109 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3110 pVCpu->iem.s.abOpcode[offOpcode + 1],
3111 pVCpu->iem.s.abOpcode[offOpcode + 2],
3112 pVCpu->iem.s.abOpcode[offOpcode + 3],
3113 pVCpu->iem.s.abOpcode[offOpcode + 4],
3114 pVCpu->iem.s.abOpcode[offOpcode + 5],
3115 pVCpu->iem.s.abOpcode[offOpcode + 6],
3116 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3117# endif
3118 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3119 return VINF_SUCCESS;
3120 }
3121 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3122}
3123
3124#else /* IEM_WITH_SETJMP */
3125
3126/**
3127 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3128 *
3129 * @returns The opcode qword.
3130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3131 */
3132DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3133{
3134# ifdef IEM_WITH_CODE_TLB
3135 uint64_t u64;
3136 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3137 return u64;
3138# else
3139 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3140 if (rcStrict == VINF_SUCCESS)
3141 {
3142 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3143 pVCpu->iem.s.offOpcode = offOpcode + 8;
3144# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3145 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3146# else
3147 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3148 pVCpu->iem.s.abOpcode[offOpcode + 1],
3149 pVCpu->iem.s.abOpcode[offOpcode + 2],
3150 pVCpu->iem.s.abOpcode[offOpcode + 3],
3151 pVCpu->iem.s.abOpcode[offOpcode + 4],
3152 pVCpu->iem.s.abOpcode[offOpcode + 5],
3153 pVCpu->iem.s.abOpcode[offOpcode + 6],
3154 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3155# endif
3156 }
3157 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3158# endif
3159}
3160
3161
3162/**
3163 * Fetches the next opcode qword, longjmp on error.
3164 *
3165 * @returns The opcode qword.
3166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3167 */
3168DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3169{
3170# ifdef IEM_WITH_CODE_TLB
3171 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3172 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3173 if (RT_LIKELY( pbBuf != NULL
3174 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3175 {
3176 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3177# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3178 return *(uint64_t const *)&pbBuf[offBuf];
3179# else
3180 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3181 pbBuf[offBuf + 1],
3182 pbBuf[offBuf + 2],
3183 pbBuf[offBuf + 3],
3184 pbBuf[offBuf + 4],
3185 pbBuf[offBuf + 5],
3186 pbBuf[offBuf + 6],
3187 pbBuf[offBuf + 7]);
3188# endif
3189 }
3190# else
3191 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3192 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3193 {
3194 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3195# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3196 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3197# else
3198 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3199 pVCpu->iem.s.abOpcode[offOpcode + 1],
3200 pVCpu->iem.s.abOpcode[offOpcode + 2],
3201 pVCpu->iem.s.abOpcode[offOpcode + 3],
3202 pVCpu->iem.s.abOpcode[offOpcode + 4],
3203 pVCpu->iem.s.abOpcode[offOpcode + 5],
3204 pVCpu->iem.s.abOpcode[offOpcode + 6],
3205 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3206# endif
3207 }
3208# endif
3209 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3210}
3211
3212#endif /* IEM_WITH_SETJMP */
3213
3214/**
3215 * Fetches the next opcode quad word, returns automatically on failure.
3216 *
3217 * @param a_pu64 Where to return the opcode quad word.
3218 * @remark Implicitly references pVCpu.
3219 */
3220#ifndef IEM_WITH_SETJMP
3221# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3222 do \
3223 { \
3224 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3225 if (rcStrict2 != VINF_SUCCESS) \
3226 return rcStrict2; \
3227 } while (0)
3228#else
3229# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3230#endif
3231
3232
3233/** @name Misc Worker Functions.
3234 * @{
3235 */
3236
3237/**
3238 * Gets the exception class for the specified exception vector.
3239 *
3240 * @returns The class of the specified exception.
3241 * @param uVector The exception vector.
3242 */
3243IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3244{
3245 Assert(uVector <= X86_XCPT_LAST);
3246 switch (uVector)
3247 {
3248 case X86_XCPT_DE:
3249 case X86_XCPT_TS:
3250 case X86_XCPT_NP:
3251 case X86_XCPT_SS:
3252 case X86_XCPT_GP:
3253 case X86_XCPT_SX: /* AMD only */
3254 return IEMXCPTCLASS_CONTRIBUTORY;
3255
3256 case X86_XCPT_PF:
3257 case X86_XCPT_VE: /* Intel only */
3258 return IEMXCPTCLASS_PAGE_FAULT;
3259
3260 case X86_XCPT_DF:
3261 return IEMXCPTCLASS_DOUBLE_FAULT;
3262 }
3263 return IEMXCPTCLASS_BENIGN;
3264}
3265
3266
3267/**
3268 * Evaluates how to handle an exception caused during delivery of another event
3269 * (exception / interrupt).
3270 *
3271 * @returns How to handle the recursive exception.
3272 * @param pVCpu The cross context virtual CPU structure of the
3273 * calling thread.
3274 * @param fPrevFlags The flags of the previous event.
3275 * @param uPrevVector The vector of the previous event.
3276 * @param fCurFlags The flags of the current exception.
3277 * @param uCurVector The vector of the current exception.
3278 * @param pfXcptRaiseInfo Where to store additional information about the
3279 * exception condition. Optional.
3280 */
3281VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3282 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3283{
3284 /*
3285 * Only CPU exceptions can be raised while delivering other events, software interrupt
3286 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3287 */
3288 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3289 Assert(pVCpu); RT_NOREF(pVCpu);
3290 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3291
3292 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3293 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3294 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3295 {
3296 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3297 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3298 {
3299 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3300 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3301 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3302 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3303 {
3304 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3305 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3306 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3307 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3308 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3309 }
3310 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3311 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3312 {
3313 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3314 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3315 }
3316 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3317 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3318 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3319 {
3320 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3321 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3322 }
3323 }
3324 else
3325 {
3326 if (uPrevVector == X86_XCPT_NMI)
3327 {
3328 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3329 if (uCurVector == X86_XCPT_PF)
3330 {
3331 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3332 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3333 }
3334 }
3335 else if ( uPrevVector == X86_XCPT_AC
3336 && uCurVector == X86_XCPT_AC)
3337 {
3338 enmRaise = IEMXCPTRAISE_CPU_HANG;
3339 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3340 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3341 }
3342 }
3343 }
3344 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3345 {
3346 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3347 if (uCurVector == X86_XCPT_PF)
3348 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3349 }
3350 else
3351 {
3352 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3353 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3354 }
3355
3356 if (pfXcptRaiseInfo)
3357 *pfXcptRaiseInfo = fRaiseInfo;
3358 return enmRaise;
3359}
3360
3361
3362/**
3363 * Enters the CPU shutdown state initiated by a triple fault or other
3364 * unrecoverable conditions.
3365 *
3366 * @returns Strict VBox status code.
3367 * @param pVCpu The cross context virtual CPU structure of the
3368 * calling thread.
3369 */
3370IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3371{
3372 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3373 {
3374 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3375 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3376 }
3377
3378 RT_NOREF(pVCpu);
3379 return VINF_EM_TRIPLE_FAULT;
3380}
3381
3382
3383/**
3384 * Validates a new SS segment.
3385 *
3386 * @returns VBox strict status code.
3387 * @param pVCpu The cross context virtual CPU structure of the
3388 * calling thread.
3389 * @param pCtx The CPU context.
3390 * @param NewSS The new SS selctor.
3391 * @param uCpl The CPL to load the stack for.
3392 * @param pDesc Where to return the descriptor.
3393 */
3394IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3395{
3396 NOREF(pCtx);
3397
3398 /* Null selectors are not allowed (we're not called for dispatching
3399 interrupts with SS=0 in long mode). */
3400 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3401 {
3402 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3403 return iemRaiseTaskSwitchFault0(pVCpu);
3404 }
3405
3406 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3407 if ((NewSS & X86_SEL_RPL) != uCpl)
3408 {
3409 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3410 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3411 }
3412
3413 /*
3414 * Read the descriptor.
3415 */
3416 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3417 if (rcStrict != VINF_SUCCESS)
3418 return rcStrict;
3419
3420 /*
3421 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3422 */
3423 if (!pDesc->Legacy.Gen.u1DescType)
3424 {
3425 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3426 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3427 }
3428
3429 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3430 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3431 {
3432 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3433 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3434 }
3435 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3436 {
3437 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3438 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3439 }
3440
3441 /* Is it there? */
3442 /** @todo testcase: Is this checked before the canonical / limit check below? */
3443 if (!pDesc->Legacy.Gen.u1Present)
3444 {
3445 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3446 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3447 }
3448
3449 return VINF_SUCCESS;
3450}
3451
3452
3453/**
3454 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3455 * not.
3456 *
3457 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3458 * @param a_pCtx The CPU context.
3459 */
3460#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3461# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3462 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3463 ? (a_pCtx)->eflags.u \
3464 : CPUMRawGetEFlags(a_pVCpu) )
3465#else
3466# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3467 ( (a_pCtx)->eflags.u )
3468#endif
3469
3470/**
3471 * Updates the EFLAGS in the correct manner wrt. PATM.
3472 *
3473 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3474 * @param a_pCtx The CPU context.
3475 * @param a_fEfl The new EFLAGS.
3476 */
3477#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3478# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3479 do { \
3480 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3481 (a_pCtx)->eflags.u = (a_fEfl); \
3482 else \
3483 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3484 } while (0)
3485#else
3486# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3487 do { \
3488 (a_pCtx)->eflags.u = (a_fEfl); \
3489 } while (0)
3490#endif
3491
3492
3493/** @} */
3494
3495/** @name Raising Exceptions.
3496 *
3497 * @{
3498 */
3499
3500
3501/**
3502 * Loads the specified stack far pointer from the TSS.
3503 *
3504 * @returns VBox strict status code.
3505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3506 * @param pCtx The CPU context.
3507 * @param uCpl The CPL to load the stack for.
3508 * @param pSelSS Where to return the new stack segment.
3509 * @param puEsp Where to return the new stack pointer.
3510 */
3511IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3512 PRTSEL pSelSS, uint32_t *puEsp)
3513{
3514 VBOXSTRICTRC rcStrict;
3515 Assert(uCpl < 4);
3516
3517 switch (pCtx->tr.Attr.n.u4Type)
3518 {
3519 /*
3520 * 16-bit TSS (X86TSS16).
3521 */
3522 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3523 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3524 {
3525 uint32_t off = uCpl * 4 + 2;
3526 if (off + 4 <= pCtx->tr.u32Limit)
3527 {
3528 /** @todo check actual access pattern here. */
3529 uint32_t u32Tmp = 0; /* gcc maybe... */
3530 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3531 if (rcStrict == VINF_SUCCESS)
3532 {
3533 *puEsp = RT_LOWORD(u32Tmp);
3534 *pSelSS = RT_HIWORD(u32Tmp);
3535 return VINF_SUCCESS;
3536 }
3537 }
3538 else
3539 {
3540 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3541 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3542 }
3543 break;
3544 }
3545
3546 /*
3547 * 32-bit TSS (X86TSS32).
3548 */
3549 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3550 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3551 {
3552 uint32_t off = uCpl * 8 + 4;
3553 if (off + 7 <= pCtx->tr.u32Limit)
3554 {
3555/** @todo check actual access pattern here. */
3556 uint64_t u64Tmp;
3557 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3558 if (rcStrict == VINF_SUCCESS)
3559 {
3560 *puEsp = u64Tmp & UINT32_MAX;
3561 *pSelSS = (RTSEL)(u64Tmp >> 32);
3562 return VINF_SUCCESS;
3563 }
3564 }
3565 else
3566 {
3567 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3568 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3569 }
3570 break;
3571 }
3572
3573 default:
3574 AssertFailed();
3575 rcStrict = VERR_IEM_IPE_4;
3576 break;
3577 }
3578
3579 *puEsp = 0; /* make gcc happy */
3580 *pSelSS = 0; /* make gcc happy */
3581 return rcStrict;
3582}
3583
3584
3585/**
3586 * Loads the specified stack pointer from the 64-bit TSS.
3587 *
3588 * @returns VBox strict status code.
3589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3590 * @param pCtx The CPU context.
3591 * @param uCpl The CPL to load the stack for.
3592 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3593 * @param puRsp Where to return the new stack pointer.
3594 */
3595IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3596{
3597 Assert(uCpl < 4);
3598 Assert(uIst < 8);
3599 *puRsp = 0; /* make gcc happy */
3600
3601 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3602
3603 uint32_t off;
3604 if (uIst)
3605 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3606 else
3607 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3608 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3609 {
3610 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3611 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3612 }
3613
3614 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3615}
3616
3617
3618/**
3619 * Adjust the CPU state according to the exception being raised.
3620 *
3621 * @param pCtx The CPU context.
3622 * @param u8Vector The exception that has been raised.
3623 */
3624DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3625{
3626 switch (u8Vector)
3627 {
3628 case X86_XCPT_DB:
3629 pCtx->dr[7] &= ~X86_DR7_GD;
3630 break;
3631 /** @todo Read the AMD and Intel exception reference... */
3632 }
3633}
3634
3635
3636/**
3637 * Implements exceptions and interrupts for real mode.
3638 *
3639 * @returns VBox strict status code.
3640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3641 * @param pCtx The CPU context.
3642 * @param cbInstr The number of bytes to offset rIP by in the return
3643 * address.
3644 * @param u8Vector The interrupt / exception vector number.
3645 * @param fFlags The flags.
3646 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3647 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3648 */
3649IEM_STATIC VBOXSTRICTRC
3650iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3651 PCPUMCTX pCtx,
3652 uint8_t cbInstr,
3653 uint8_t u8Vector,
3654 uint32_t fFlags,
3655 uint16_t uErr,
3656 uint64_t uCr2)
3657{
3658 NOREF(uErr); NOREF(uCr2);
3659
3660 /*
3661 * Read the IDT entry.
3662 */
3663 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3664 {
3665 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3666 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3667 }
3668 RTFAR16 Idte;
3669 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3670 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3671 {
3672 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3673 return rcStrict;
3674 }
3675
3676 /*
3677 * Push the stack frame.
3678 */
3679 uint16_t *pu16Frame;
3680 uint64_t uNewRsp;
3681 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3682 if (rcStrict != VINF_SUCCESS)
3683 return rcStrict;
3684
3685 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3686#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3687 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3688 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3689 fEfl |= UINT16_C(0xf000);
3690#endif
3691 pu16Frame[2] = (uint16_t)fEfl;
3692 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3693 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3694 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3695 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3696 return rcStrict;
3697
3698 /*
3699 * Load the vector address into cs:ip and make exception specific state
3700 * adjustments.
3701 */
3702 pCtx->cs.Sel = Idte.sel;
3703 pCtx->cs.ValidSel = Idte.sel;
3704 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3705 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3706 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3707 pCtx->rip = Idte.off;
3708 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3709 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3710
3711 /** @todo do we actually do this in real mode? */
3712 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3713 iemRaiseXcptAdjustState(pCtx, u8Vector);
3714
3715 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3716}
3717
3718
3719/**
3720 * Loads a NULL data selector into when coming from V8086 mode.
3721 *
3722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3723 * @param pSReg Pointer to the segment register.
3724 */
3725IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3726{
3727 pSReg->Sel = 0;
3728 pSReg->ValidSel = 0;
3729 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3730 {
3731 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3732 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3733 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3734 }
3735 else
3736 {
3737 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3738 /** @todo check this on AMD-V */
3739 pSReg->u64Base = 0;
3740 pSReg->u32Limit = 0;
3741 }
3742}
3743
3744
3745/**
3746 * Loads a segment selector during a task switch in V8086 mode.
3747 *
3748 * @param pSReg Pointer to the segment register.
3749 * @param uSel The selector value to load.
3750 */
3751IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3752{
3753 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3754 pSReg->Sel = uSel;
3755 pSReg->ValidSel = uSel;
3756 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3757 pSReg->u64Base = uSel << 4;
3758 pSReg->u32Limit = 0xffff;
3759 pSReg->Attr.u = 0xf3;
3760}
3761
3762
3763/**
3764 * Loads a NULL data selector into a selector register, both the hidden and
3765 * visible parts, in protected mode.
3766 *
3767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3768 * @param pSReg Pointer to the segment register.
3769 * @param uRpl The RPL.
3770 */
3771IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3772{
3773 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3774 * data selector in protected mode. */
3775 pSReg->Sel = uRpl;
3776 pSReg->ValidSel = uRpl;
3777 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3778 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3779 {
3780 /* VT-x (Intel 3960x) observed doing something like this. */
3781 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3782 pSReg->u32Limit = UINT32_MAX;
3783 pSReg->u64Base = 0;
3784 }
3785 else
3786 {
3787 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3788 pSReg->u32Limit = 0;
3789 pSReg->u64Base = 0;
3790 }
3791}
3792
3793
3794/**
3795 * Loads a segment selector during a task switch in protected mode.
3796 *
3797 * In this task switch scenario, we would throw \#TS exceptions rather than
3798 * \#GPs.
3799 *
3800 * @returns VBox strict status code.
3801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3802 * @param pSReg Pointer to the segment register.
3803 * @param uSel The new selector value.
3804 *
3805 * @remarks This does _not_ handle CS or SS.
3806 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3807 */
3808IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3809{
3810 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3811
3812 /* Null data selector. */
3813 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3814 {
3815 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3816 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3817 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3818 return VINF_SUCCESS;
3819 }
3820
3821 /* Fetch the descriptor. */
3822 IEMSELDESC Desc;
3823 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3824 if (rcStrict != VINF_SUCCESS)
3825 {
3826 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3827 VBOXSTRICTRC_VAL(rcStrict)));
3828 return rcStrict;
3829 }
3830
3831 /* Must be a data segment or readable code segment. */
3832 if ( !Desc.Legacy.Gen.u1DescType
3833 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3834 {
3835 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3836 Desc.Legacy.Gen.u4Type));
3837 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3838 }
3839
3840 /* Check privileges for data segments and non-conforming code segments. */
3841 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3842 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3843 {
3844 /* The RPL and the new CPL must be less than or equal to the DPL. */
3845 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3846 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3847 {
3848 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3849 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3850 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3851 }
3852 }
3853
3854 /* Is it there? */
3855 if (!Desc.Legacy.Gen.u1Present)
3856 {
3857 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3858 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3859 }
3860
3861 /* The base and limit. */
3862 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3863 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3864
3865 /*
3866 * Ok, everything checked out fine. Now set the accessed bit before
3867 * committing the result into the registers.
3868 */
3869 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3870 {
3871 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3872 if (rcStrict != VINF_SUCCESS)
3873 return rcStrict;
3874 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3875 }
3876
3877 /* Commit */
3878 pSReg->Sel = uSel;
3879 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3880 pSReg->u32Limit = cbLimit;
3881 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3882 pSReg->ValidSel = uSel;
3883 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3884 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3885 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3886
3887 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3888 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3889 return VINF_SUCCESS;
3890}
3891
3892
3893/**
3894 * Performs a task switch.
3895 *
3896 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3897 * caller is responsible for performing the necessary checks (like DPL, TSS
3898 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3899 * reference for JMP, CALL, IRET.
3900 *
3901 * If the task switch is the due to a software interrupt or hardware exception,
3902 * the caller is responsible for validating the TSS selector and descriptor. See
3903 * Intel Instruction reference for INT n.
3904 *
3905 * @returns VBox strict status code.
3906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3907 * @param pCtx The CPU context.
3908 * @param enmTaskSwitch What caused this task switch.
3909 * @param uNextEip The EIP effective after the task switch.
3910 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3911 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3912 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3913 * @param SelTSS The TSS selector of the new task.
3914 * @param pNewDescTSS Pointer to the new TSS descriptor.
3915 */
3916IEM_STATIC VBOXSTRICTRC
3917iemTaskSwitch(PVMCPU pVCpu,
3918 PCPUMCTX pCtx,
3919 IEMTASKSWITCH enmTaskSwitch,
3920 uint32_t uNextEip,
3921 uint32_t fFlags,
3922 uint16_t uErr,
3923 uint64_t uCr2,
3924 RTSEL SelTSS,
3925 PIEMSELDESC pNewDescTSS)
3926{
3927 Assert(!IEM_IS_REAL_MODE(pVCpu));
3928 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3929
3930 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3931 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3932 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3933 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3934 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3935
3936 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3937 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3938
3939 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3940 fIsNewTSS386, pCtx->eip, uNextEip));
3941
3942 /* Update CR2 in case it's a page-fault. */
3943 /** @todo This should probably be done much earlier in IEM/PGM. See
3944 * @bugref{5653#c49}. */
3945 if (fFlags & IEM_XCPT_FLAGS_CR2)
3946 pCtx->cr2 = uCr2;
3947
3948 /*
3949 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3950 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3951 */
3952 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3953 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3954 if (uNewTSSLimit < uNewTSSLimitMin)
3955 {
3956 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3957 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3958 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3959 }
3960
3961 /*
3962 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3963 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3964 */
3965 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3966 {
3967 uint32_t const uExitInfo1 = SelTSS;
3968 uint32_t uExitInfo2 = uErr;
3969 switch (enmTaskSwitch)
3970 {
3971 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3972 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3973 default: break;
3974 }
3975 if (fFlags & IEM_XCPT_FLAGS_ERR)
3976 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3977 if (pCtx->eflags.Bits.u1RF)
3978 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3979
3980 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3981 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3982 RT_NOREF2(uExitInfo1, uExitInfo2);
3983 }
3984 /** @todo Nested-VMX task-switch intercept. */
3985
3986 /*
3987 * Check the current TSS limit. The last written byte to the current TSS during the
3988 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3989 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3990 *
3991 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3992 * end up with smaller than "legal" TSS limits.
3993 */
3994 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3995 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3996 if (uCurTSSLimit < uCurTSSLimitMin)
3997 {
3998 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3999 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4000 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4001 }
4002
4003 /*
4004 * Verify that the new TSS can be accessed and map it. Map only the required contents
4005 * and not the entire TSS.
4006 */
4007 void *pvNewTSS;
4008 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4009 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4010 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4011 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4012 * not perform correct translation if this happens. See Intel spec. 7.2.1
4013 * "Task-State Segment" */
4014 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4015 if (rcStrict != VINF_SUCCESS)
4016 {
4017 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4018 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4019 return rcStrict;
4020 }
4021
4022 /*
4023 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4024 */
4025 uint32_t u32EFlags = pCtx->eflags.u32;
4026 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4027 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4028 {
4029 PX86DESC pDescCurTSS;
4030 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4031 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4032 if (rcStrict != VINF_SUCCESS)
4033 {
4034 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4035 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4036 return rcStrict;
4037 }
4038
4039 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4040 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4041 if (rcStrict != VINF_SUCCESS)
4042 {
4043 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4044 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4045 return rcStrict;
4046 }
4047
4048 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4049 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4050 {
4051 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4052 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4053 u32EFlags &= ~X86_EFL_NT;
4054 }
4055 }
4056
4057 /*
4058 * Save the CPU state into the current TSS.
4059 */
4060 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4061 if (GCPtrNewTSS == GCPtrCurTSS)
4062 {
4063 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4064 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4065 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4066 }
4067 if (fIsNewTSS386)
4068 {
4069 /*
4070 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4071 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4072 */
4073 void *pvCurTSS32;
4074 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4075 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4076 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4077 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4078 if (rcStrict != VINF_SUCCESS)
4079 {
4080 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4081 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4082 return rcStrict;
4083 }
4084
4085 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4086 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4087 pCurTSS32->eip = uNextEip;
4088 pCurTSS32->eflags = u32EFlags;
4089 pCurTSS32->eax = pCtx->eax;
4090 pCurTSS32->ecx = pCtx->ecx;
4091 pCurTSS32->edx = pCtx->edx;
4092 pCurTSS32->ebx = pCtx->ebx;
4093 pCurTSS32->esp = pCtx->esp;
4094 pCurTSS32->ebp = pCtx->ebp;
4095 pCurTSS32->esi = pCtx->esi;
4096 pCurTSS32->edi = pCtx->edi;
4097 pCurTSS32->es = pCtx->es.Sel;
4098 pCurTSS32->cs = pCtx->cs.Sel;
4099 pCurTSS32->ss = pCtx->ss.Sel;
4100 pCurTSS32->ds = pCtx->ds.Sel;
4101 pCurTSS32->fs = pCtx->fs.Sel;
4102 pCurTSS32->gs = pCtx->gs.Sel;
4103
4104 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4105 if (rcStrict != VINF_SUCCESS)
4106 {
4107 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4108 VBOXSTRICTRC_VAL(rcStrict)));
4109 return rcStrict;
4110 }
4111 }
4112 else
4113 {
4114 /*
4115 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4116 */
4117 void *pvCurTSS16;
4118 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4119 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4120 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4121 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4122 if (rcStrict != VINF_SUCCESS)
4123 {
4124 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4125 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4126 return rcStrict;
4127 }
4128
4129 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4130 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4131 pCurTSS16->ip = uNextEip;
4132 pCurTSS16->flags = u32EFlags;
4133 pCurTSS16->ax = pCtx->ax;
4134 pCurTSS16->cx = pCtx->cx;
4135 pCurTSS16->dx = pCtx->dx;
4136 pCurTSS16->bx = pCtx->bx;
4137 pCurTSS16->sp = pCtx->sp;
4138 pCurTSS16->bp = pCtx->bp;
4139 pCurTSS16->si = pCtx->si;
4140 pCurTSS16->di = pCtx->di;
4141 pCurTSS16->es = pCtx->es.Sel;
4142 pCurTSS16->cs = pCtx->cs.Sel;
4143 pCurTSS16->ss = pCtx->ss.Sel;
4144 pCurTSS16->ds = pCtx->ds.Sel;
4145
4146 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4147 if (rcStrict != VINF_SUCCESS)
4148 {
4149 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4150 VBOXSTRICTRC_VAL(rcStrict)));
4151 return rcStrict;
4152 }
4153 }
4154
4155 /*
4156 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4157 */
4158 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4159 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4160 {
4161 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4162 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4163 pNewTSS->selPrev = pCtx->tr.Sel;
4164 }
4165
4166 /*
4167 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4168 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4169 */
4170 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4171 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4172 bool fNewDebugTrap;
4173 if (fIsNewTSS386)
4174 {
4175 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4176 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4177 uNewEip = pNewTSS32->eip;
4178 uNewEflags = pNewTSS32->eflags;
4179 uNewEax = pNewTSS32->eax;
4180 uNewEcx = pNewTSS32->ecx;
4181 uNewEdx = pNewTSS32->edx;
4182 uNewEbx = pNewTSS32->ebx;
4183 uNewEsp = pNewTSS32->esp;
4184 uNewEbp = pNewTSS32->ebp;
4185 uNewEsi = pNewTSS32->esi;
4186 uNewEdi = pNewTSS32->edi;
4187 uNewES = pNewTSS32->es;
4188 uNewCS = pNewTSS32->cs;
4189 uNewSS = pNewTSS32->ss;
4190 uNewDS = pNewTSS32->ds;
4191 uNewFS = pNewTSS32->fs;
4192 uNewGS = pNewTSS32->gs;
4193 uNewLdt = pNewTSS32->selLdt;
4194 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4195 }
4196 else
4197 {
4198 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4199 uNewCr3 = 0;
4200 uNewEip = pNewTSS16->ip;
4201 uNewEflags = pNewTSS16->flags;
4202 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4203 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4204 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4205 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4206 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4207 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4208 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4209 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4210 uNewES = pNewTSS16->es;
4211 uNewCS = pNewTSS16->cs;
4212 uNewSS = pNewTSS16->ss;
4213 uNewDS = pNewTSS16->ds;
4214 uNewFS = 0;
4215 uNewGS = 0;
4216 uNewLdt = pNewTSS16->selLdt;
4217 fNewDebugTrap = false;
4218 }
4219
4220 if (GCPtrNewTSS == GCPtrCurTSS)
4221 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4222 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4223
4224 /*
4225 * We're done accessing the new TSS.
4226 */
4227 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4228 if (rcStrict != VINF_SUCCESS)
4229 {
4230 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4231 return rcStrict;
4232 }
4233
4234 /*
4235 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4236 */
4237 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4238 {
4239 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4240 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4241 if (rcStrict != VINF_SUCCESS)
4242 {
4243 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4244 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4245 return rcStrict;
4246 }
4247
4248 /* Check that the descriptor indicates the new TSS is available (not busy). */
4249 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4250 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4251 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4252
4253 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4254 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4255 if (rcStrict != VINF_SUCCESS)
4256 {
4257 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4258 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4259 return rcStrict;
4260 }
4261 }
4262
4263 /*
4264 * From this point on, we're technically in the new task. We will defer exceptions
4265 * until the completion of the task switch but before executing any instructions in the new task.
4266 */
4267 pCtx->tr.Sel = SelTSS;
4268 pCtx->tr.ValidSel = SelTSS;
4269 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4270 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4271 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4272 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4273 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4274
4275 /* Set the busy bit in TR. */
4276 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4277 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4278 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4279 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4280 {
4281 uNewEflags |= X86_EFL_NT;
4282 }
4283
4284 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4285 pCtx->cr0 |= X86_CR0_TS;
4286 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4287
4288 pCtx->eip = uNewEip;
4289 pCtx->eax = uNewEax;
4290 pCtx->ecx = uNewEcx;
4291 pCtx->edx = uNewEdx;
4292 pCtx->ebx = uNewEbx;
4293 pCtx->esp = uNewEsp;
4294 pCtx->ebp = uNewEbp;
4295 pCtx->esi = uNewEsi;
4296 pCtx->edi = uNewEdi;
4297
4298 uNewEflags &= X86_EFL_LIVE_MASK;
4299 uNewEflags |= X86_EFL_RA1_MASK;
4300 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4301
4302 /*
4303 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4304 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4305 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4306 */
4307 pCtx->es.Sel = uNewES;
4308 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4309
4310 pCtx->cs.Sel = uNewCS;
4311 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4312
4313 pCtx->ss.Sel = uNewSS;
4314 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4315
4316 pCtx->ds.Sel = uNewDS;
4317 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4318
4319 pCtx->fs.Sel = uNewFS;
4320 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4321
4322 pCtx->gs.Sel = uNewGS;
4323 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4324 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4325
4326 pCtx->ldtr.Sel = uNewLdt;
4327 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4328 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4329 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4330
4331 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4332 {
4333 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4334 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4335 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4336 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4337 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4338 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4339 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4340 }
4341
4342 /*
4343 * Switch CR3 for the new task.
4344 */
4345 if ( fIsNewTSS386
4346 && (pCtx->cr0 & X86_CR0_PG))
4347 {
4348 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4349 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4350 {
4351 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4352 AssertRCSuccessReturn(rc, rc);
4353 }
4354 else
4355 pCtx->cr3 = uNewCr3;
4356
4357 /* Inform PGM. */
4358 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4359 {
4360 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4361 AssertRCReturn(rc, rc);
4362 /* ignore informational status codes */
4363 }
4364 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4365 }
4366
4367 /*
4368 * Switch LDTR for the new task.
4369 */
4370 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4371 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4372 else
4373 {
4374 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4375
4376 IEMSELDESC DescNewLdt;
4377 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4378 if (rcStrict != VINF_SUCCESS)
4379 {
4380 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4381 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4382 return rcStrict;
4383 }
4384 if ( !DescNewLdt.Legacy.Gen.u1Present
4385 || DescNewLdt.Legacy.Gen.u1DescType
4386 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4387 {
4388 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4389 uNewLdt, DescNewLdt.Legacy.u));
4390 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4391 }
4392
4393 pCtx->ldtr.ValidSel = uNewLdt;
4394 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4395 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4396 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4397 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4398 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4399 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4400 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4401 }
4402
4403 IEMSELDESC DescSS;
4404 if (IEM_IS_V86_MODE(pVCpu))
4405 {
4406 pVCpu->iem.s.uCpl = 3;
4407 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4408 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4409 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4410 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4411 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4412 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4413
4414 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4415 DescSS.Legacy.u = 0;
4416 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4417 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4418 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4419 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4420 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4421 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4422 DescSS.Legacy.Gen.u2Dpl = 3;
4423 }
4424 else
4425 {
4426 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4427
4428 /*
4429 * Load the stack segment for the new task.
4430 */
4431 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4432 {
4433 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4434 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4435 }
4436
4437 /* Fetch the descriptor. */
4438 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4439 if (rcStrict != VINF_SUCCESS)
4440 {
4441 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4442 VBOXSTRICTRC_VAL(rcStrict)));
4443 return rcStrict;
4444 }
4445
4446 /* SS must be a data segment and writable. */
4447 if ( !DescSS.Legacy.Gen.u1DescType
4448 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4449 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4450 {
4451 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4452 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4453 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4454 }
4455
4456 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4457 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4458 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4459 {
4460 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4461 uNewCpl));
4462 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4463 }
4464
4465 /* Is it there? */
4466 if (!DescSS.Legacy.Gen.u1Present)
4467 {
4468 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4469 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4470 }
4471
4472 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4473 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4474
4475 /* Set the accessed bit before committing the result into SS. */
4476 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4477 {
4478 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4479 if (rcStrict != VINF_SUCCESS)
4480 return rcStrict;
4481 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4482 }
4483
4484 /* Commit SS. */
4485 pCtx->ss.Sel = uNewSS;
4486 pCtx->ss.ValidSel = uNewSS;
4487 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4488 pCtx->ss.u32Limit = cbLimit;
4489 pCtx->ss.u64Base = u64Base;
4490 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4491 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4492
4493 /* CPL has changed, update IEM before loading rest of segments. */
4494 pVCpu->iem.s.uCpl = uNewCpl;
4495
4496 /*
4497 * Load the data segments for the new task.
4498 */
4499 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4500 if (rcStrict != VINF_SUCCESS)
4501 return rcStrict;
4502 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4503 if (rcStrict != VINF_SUCCESS)
4504 return rcStrict;
4505 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4506 if (rcStrict != VINF_SUCCESS)
4507 return rcStrict;
4508 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4509 if (rcStrict != VINF_SUCCESS)
4510 return rcStrict;
4511
4512 /*
4513 * Load the code segment for the new task.
4514 */
4515 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4516 {
4517 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4518 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4519 }
4520
4521 /* Fetch the descriptor. */
4522 IEMSELDESC DescCS;
4523 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4524 if (rcStrict != VINF_SUCCESS)
4525 {
4526 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4527 return rcStrict;
4528 }
4529
4530 /* CS must be a code segment. */
4531 if ( !DescCS.Legacy.Gen.u1DescType
4532 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4533 {
4534 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4535 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4536 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4537 }
4538
4539 /* For conforming CS, DPL must be less than or equal to the RPL. */
4540 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4541 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4542 {
4543 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4544 DescCS.Legacy.Gen.u2Dpl));
4545 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4546 }
4547
4548 /* For non-conforming CS, DPL must match RPL. */
4549 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4550 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4551 {
4552 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4553 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4554 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4555 }
4556
4557 /* Is it there? */
4558 if (!DescCS.Legacy.Gen.u1Present)
4559 {
4560 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4561 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4562 }
4563
4564 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4565 u64Base = X86DESC_BASE(&DescCS.Legacy);
4566
4567 /* Set the accessed bit before committing the result into CS. */
4568 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4569 {
4570 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4571 if (rcStrict != VINF_SUCCESS)
4572 return rcStrict;
4573 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4574 }
4575
4576 /* Commit CS. */
4577 pCtx->cs.Sel = uNewCS;
4578 pCtx->cs.ValidSel = uNewCS;
4579 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4580 pCtx->cs.u32Limit = cbLimit;
4581 pCtx->cs.u64Base = u64Base;
4582 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4583 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4584 }
4585
4586 /** @todo Debug trap. */
4587 if (fIsNewTSS386 && fNewDebugTrap)
4588 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4589
4590 /*
4591 * Construct the error code masks based on what caused this task switch.
4592 * See Intel Instruction reference for INT.
4593 */
4594 uint16_t uExt;
4595 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4596 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4597 {
4598 uExt = 1;
4599 }
4600 else
4601 uExt = 0;
4602
4603 /*
4604 * Push any error code on to the new stack.
4605 */
4606 if (fFlags & IEM_XCPT_FLAGS_ERR)
4607 {
4608 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4609 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4610 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4611
4612 /* Check that there is sufficient space on the stack. */
4613 /** @todo Factor out segment limit checking for normal/expand down segments
4614 * into a separate function. */
4615 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4616 {
4617 if ( pCtx->esp - 1 > cbLimitSS
4618 || pCtx->esp < cbStackFrame)
4619 {
4620 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4621 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4622 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4623 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4624 }
4625 }
4626 else
4627 {
4628 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4629 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4630 {
4631 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4632 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4633 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4634 }
4635 }
4636
4637
4638 if (fIsNewTSS386)
4639 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4640 else
4641 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4642 if (rcStrict != VINF_SUCCESS)
4643 {
4644 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4645 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4646 return rcStrict;
4647 }
4648 }
4649
4650 /* Check the new EIP against the new CS limit. */
4651 if (pCtx->eip > pCtx->cs.u32Limit)
4652 {
4653 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4654 pCtx->eip, pCtx->cs.u32Limit));
4655 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4656 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4657 }
4658
4659 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4660 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4661}
4662
4663
4664/**
4665 * Implements exceptions and interrupts for protected mode.
4666 *
4667 * @returns VBox strict status code.
4668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4669 * @param pCtx The CPU context.
4670 * @param cbInstr The number of bytes to offset rIP by in the return
4671 * address.
4672 * @param u8Vector The interrupt / exception vector number.
4673 * @param fFlags The flags.
4674 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4675 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4676 */
4677IEM_STATIC VBOXSTRICTRC
4678iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4679 PCPUMCTX pCtx,
4680 uint8_t cbInstr,
4681 uint8_t u8Vector,
4682 uint32_t fFlags,
4683 uint16_t uErr,
4684 uint64_t uCr2)
4685{
4686 /*
4687 * Read the IDT entry.
4688 */
4689 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4690 {
4691 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4692 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4693 }
4694 X86DESC Idte;
4695 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4696 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4697 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4698 {
4699 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4700 return rcStrict;
4701 }
4702 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4703 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4704 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4705
4706 /*
4707 * Check the descriptor type, DPL and such.
4708 * ASSUMES this is done in the same order as described for call-gate calls.
4709 */
4710 if (Idte.Gate.u1DescType)
4711 {
4712 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4713 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4714 }
4715 bool fTaskGate = false;
4716 uint8_t f32BitGate = true;
4717 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4718 switch (Idte.Gate.u4Type)
4719 {
4720 case X86_SEL_TYPE_SYS_UNDEFINED:
4721 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4722 case X86_SEL_TYPE_SYS_LDT:
4723 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4724 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4725 case X86_SEL_TYPE_SYS_UNDEFINED2:
4726 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4727 case X86_SEL_TYPE_SYS_UNDEFINED3:
4728 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4729 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4730 case X86_SEL_TYPE_SYS_UNDEFINED4:
4731 {
4732 /** @todo check what actually happens when the type is wrong...
4733 * esp. call gates. */
4734 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4735 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4736 }
4737
4738 case X86_SEL_TYPE_SYS_286_INT_GATE:
4739 f32BitGate = false;
4740 RT_FALL_THRU();
4741 case X86_SEL_TYPE_SYS_386_INT_GATE:
4742 fEflToClear |= X86_EFL_IF;
4743 break;
4744
4745 case X86_SEL_TYPE_SYS_TASK_GATE:
4746 fTaskGate = true;
4747#ifndef IEM_IMPLEMENTS_TASKSWITCH
4748 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4749#endif
4750 break;
4751
4752 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4753 f32BitGate = false;
4754 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4755 break;
4756
4757 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4758 }
4759
4760 /* Check DPL against CPL if applicable. */
4761 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4762 {
4763 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4764 {
4765 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4766 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4767 }
4768 }
4769
4770 /* Is it there? */
4771 if (!Idte.Gate.u1Present)
4772 {
4773 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4774 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4775 }
4776
4777 /* Is it a task-gate? */
4778 if (fTaskGate)
4779 {
4780 /*
4781 * Construct the error code masks based on what caused this task switch.
4782 * See Intel Instruction reference for INT.
4783 */
4784 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4785 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4786 RTSEL SelTSS = Idte.Gate.u16Sel;
4787
4788 /*
4789 * Fetch the TSS descriptor in the GDT.
4790 */
4791 IEMSELDESC DescTSS;
4792 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4793 if (rcStrict != VINF_SUCCESS)
4794 {
4795 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4796 VBOXSTRICTRC_VAL(rcStrict)));
4797 return rcStrict;
4798 }
4799
4800 /* The TSS descriptor must be a system segment and be available (not busy). */
4801 if ( DescTSS.Legacy.Gen.u1DescType
4802 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4803 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4804 {
4805 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4806 u8Vector, SelTSS, DescTSS.Legacy.au64));
4807 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4808 }
4809
4810 /* The TSS must be present. */
4811 if (!DescTSS.Legacy.Gen.u1Present)
4812 {
4813 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4814 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4815 }
4816
4817 /* Do the actual task switch. */
4818 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4819 }
4820
4821 /* A null CS is bad. */
4822 RTSEL NewCS = Idte.Gate.u16Sel;
4823 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4824 {
4825 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4826 return iemRaiseGeneralProtectionFault0(pVCpu);
4827 }
4828
4829 /* Fetch the descriptor for the new CS. */
4830 IEMSELDESC DescCS;
4831 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4832 if (rcStrict != VINF_SUCCESS)
4833 {
4834 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4835 return rcStrict;
4836 }
4837
4838 /* Must be a code segment. */
4839 if (!DescCS.Legacy.Gen.u1DescType)
4840 {
4841 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4842 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4843 }
4844 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4845 {
4846 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4847 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4848 }
4849
4850 /* Don't allow lowering the privilege level. */
4851 /** @todo Does the lowering of privileges apply to software interrupts
4852 * only? This has bearings on the more-privileged or
4853 * same-privilege stack behavior further down. A testcase would
4854 * be nice. */
4855 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4856 {
4857 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4858 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4859 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4860 }
4861
4862 /* Make sure the selector is present. */
4863 if (!DescCS.Legacy.Gen.u1Present)
4864 {
4865 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4866 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4867 }
4868
4869 /* Check the new EIP against the new CS limit. */
4870 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4871 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4872 ? Idte.Gate.u16OffsetLow
4873 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4874 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4875 if (uNewEip > cbLimitCS)
4876 {
4877 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4878 u8Vector, uNewEip, cbLimitCS, NewCS));
4879 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4880 }
4881 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4882
4883 /* Calc the flag image to push. */
4884 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4885 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4886 fEfl &= ~X86_EFL_RF;
4887 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4888 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4889
4890 /* From V8086 mode only go to CPL 0. */
4891 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4892 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4893 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4894 {
4895 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4896 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4897 }
4898
4899 /*
4900 * If the privilege level changes, we need to get a new stack from the TSS.
4901 * This in turns means validating the new SS and ESP...
4902 */
4903 if (uNewCpl != pVCpu->iem.s.uCpl)
4904 {
4905 RTSEL NewSS;
4906 uint32_t uNewEsp;
4907 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4908 if (rcStrict != VINF_SUCCESS)
4909 return rcStrict;
4910
4911 IEMSELDESC DescSS;
4912 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4913 if (rcStrict != VINF_SUCCESS)
4914 return rcStrict;
4915 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4916 if (!DescSS.Legacy.Gen.u1DefBig)
4917 {
4918 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4919 uNewEsp = (uint16_t)uNewEsp;
4920 }
4921
4922 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4923
4924 /* Check that there is sufficient space for the stack frame. */
4925 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4926 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4927 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4928 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4929
4930 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4931 {
4932 if ( uNewEsp - 1 > cbLimitSS
4933 || uNewEsp < cbStackFrame)
4934 {
4935 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4936 u8Vector, NewSS, uNewEsp, cbStackFrame));
4937 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4938 }
4939 }
4940 else
4941 {
4942 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4943 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4944 {
4945 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4946 u8Vector, NewSS, uNewEsp, cbStackFrame));
4947 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4948 }
4949 }
4950
4951 /*
4952 * Start making changes.
4953 */
4954
4955 /* Set the new CPL so that stack accesses use it. */
4956 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4957 pVCpu->iem.s.uCpl = uNewCpl;
4958
4959 /* Create the stack frame. */
4960 RTPTRUNION uStackFrame;
4961 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4962 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4963 if (rcStrict != VINF_SUCCESS)
4964 return rcStrict;
4965 void * const pvStackFrame = uStackFrame.pv;
4966 if (f32BitGate)
4967 {
4968 if (fFlags & IEM_XCPT_FLAGS_ERR)
4969 *uStackFrame.pu32++ = uErr;
4970 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4971 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4972 uStackFrame.pu32[2] = fEfl;
4973 uStackFrame.pu32[3] = pCtx->esp;
4974 uStackFrame.pu32[4] = pCtx->ss.Sel;
4975 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4976 if (fEfl & X86_EFL_VM)
4977 {
4978 uStackFrame.pu32[1] = pCtx->cs.Sel;
4979 uStackFrame.pu32[5] = pCtx->es.Sel;
4980 uStackFrame.pu32[6] = pCtx->ds.Sel;
4981 uStackFrame.pu32[7] = pCtx->fs.Sel;
4982 uStackFrame.pu32[8] = pCtx->gs.Sel;
4983 }
4984 }
4985 else
4986 {
4987 if (fFlags & IEM_XCPT_FLAGS_ERR)
4988 *uStackFrame.pu16++ = uErr;
4989 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4990 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4991 uStackFrame.pu16[2] = fEfl;
4992 uStackFrame.pu16[3] = pCtx->sp;
4993 uStackFrame.pu16[4] = pCtx->ss.Sel;
4994 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4995 if (fEfl & X86_EFL_VM)
4996 {
4997 uStackFrame.pu16[1] = pCtx->cs.Sel;
4998 uStackFrame.pu16[5] = pCtx->es.Sel;
4999 uStackFrame.pu16[6] = pCtx->ds.Sel;
5000 uStackFrame.pu16[7] = pCtx->fs.Sel;
5001 uStackFrame.pu16[8] = pCtx->gs.Sel;
5002 }
5003 }
5004 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5005 if (rcStrict != VINF_SUCCESS)
5006 return rcStrict;
5007
5008 /* Mark the selectors 'accessed' (hope this is the correct time). */
5009 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5010 * after pushing the stack frame? (Write protect the gdt + stack to
5011 * find out.) */
5012 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5013 {
5014 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5015 if (rcStrict != VINF_SUCCESS)
5016 return rcStrict;
5017 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5018 }
5019
5020 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5021 {
5022 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5023 if (rcStrict != VINF_SUCCESS)
5024 return rcStrict;
5025 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5026 }
5027
5028 /*
5029 * Start comitting the register changes (joins with the DPL=CPL branch).
5030 */
5031 pCtx->ss.Sel = NewSS;
5032 pCtx->ss.ValidSel = NewSS;
5033 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5034 pCtx->ss.u32Limit = cbLimitSS;
5035 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5036 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5037 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5038 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5039 * SP is loaded).
5040 * Need to check the other combinations too:
5041 * - 16-bit TSS, 32-bit handler
5042 * - 32-bit TSS, 16-bit handler */
5043 if (!pCtx->ss.Attr.n.u1DefBig)
5044 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5045 else
5046 pCtx->rsp = uNewEsp - cbStackFrame;
5047
5048 if (fEfl & X86_EFL_VM)
5049 {
5050 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5051 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5052 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5053 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5054 }
5055 }
5056 /*
5057 * Same privilege, no stack change and smaller stack frame.
5058 */
5059 else
5060 {
5061 uint64_t uNewRsp;
5062 RTPTRUNION uStackFrame;
5063 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5064 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5065 if (rcStrict != VINF_SUCCESS)
5066 return rcStrict;
5067 void * const pvStackFrame = uStackFrame.pv;
5068
5069 if (f32BitGate)
5070 {
5071 if (fFlags & IEM_XCPT_FLAGS_ERR)
5072 *uStackFrame.pu32++ = uErr;
5073 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5074 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5075 uStackFrame.pu32[2] = fEfl;
5076 }
5077 else
5078 {
5079 if (fFlags & IEM_XCPT_FLAGS_ERR)
5080 *uStackFrame.pu16++ = uErr;
5081 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5082 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5083 uStackFrame.pu16[2] = fEfl;
5084 }
5085 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5086 if (rcStrict != VINF_SUCCESS)
5087 return rcStrict;
5088
5089 /* Mark the CS selector as 'accessed'. */
5090 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5091 {
5092 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5093 if (rcStrict != VINF_SUCCESS)
5094 return rcStrict;
5095 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5096 }
5097
5098 /*
5099 * Start committing the register changes (joins with the other branch).
5100 */
5101 pCtx->rsp = uNewRsp;
5102 }
5103
5104 /* ... register committing continues. */
5105 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5106 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5107 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5108 pCtx->cs.u32Limit = cbLimitCS;
5109 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5110 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5111
5112 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5113 fEfl &= ~fEflToClear;
5114 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5115
5116 if (fFlags & IEM_XCPT_FLAGS_CR2)
5117 pCtx->cr2 = uCr2;
5118
5119 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5120 iemRaiseXcptAdjustState(pCtx, u8Vector);
5121
5122 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5123}
5124
5125
5126/**
5127 * Implements exceptions and interrupts for long mode.
5128 *
5129 * @returns VBox strict status code.
5130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5131 * @param pCtx The CPU context.
5132 * @param cbInstr The number of bytes to offset rIP by in the return
5133 * address.
5134 * @param u8Vector The interrupt / exception vector number.
5135 * @param fFlags The flags.
5136 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5137 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5138 */
5139IEM_STATIC VBOXSTRICTRC
5140iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5141 PCPUMCTX pCtx,
5142 uint8_t cbInstr,
5143 uint8_t u8Vector,
5144 uint32_t fFlags,
5145 uint16_t uErr,
5146 uint64_t uCr2)
5147{
5148 /*
5149 * Read the IDT entry.
5150 */
5151 uint16_t offIdt = (uint16_t)u8Vector << 4;
5152 if (pCtx->idtr.cbIdt < offIdt + 7)
5153 {
5154 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5155 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5156 }
5157 X86DESC64 Idte;
5158 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5159 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5160 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5161 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5162 {
5163 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5164 return rcStrict;
5165 }
5166 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5167 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5168 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5169
5170 /*
5171 * Check the descriptor type, DPL and such.
5172 * ASSUMES this is done in the same order as described for call-gate calls.
5173 */
5174 if (Idte.Gate.u1DescType)
5175 {
5176 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5177 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5178 }
5179 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5180 switch (Idte.Gate.u4Type)
5181 {
5182 case AMD64_SEL_TYPE_SYS_INT_GATE:
5183 fEflToClear |= X86_EFL_IF;
5184 break;
5185 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5186 break;
5187
5188 default:
5189 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5190 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5191 }
5192
5193 /* Check DPL against CPL if applicable. */
5194 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5195 {
5196 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5197 {
5198 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5199 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5200 }
5201 }
5202
5203 /* Is it there? */
5204 if (!Idte.Gate.u1Present)
5205 {
5206 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5207 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5208 }
5209
5210 /* A null CS is bad. */
5211 RTSEL NewCS = Idte.Gate.u16Sel;
5212 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5213 {
5214 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5215 return iemRaiseGeneralProtectionFault0(pVCpu);
5216 }
5217
5218 /* Fetch the descriptor for the new CS. */
5219 IEMSELDESC DescCS;
5220 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5221 if (rcStrict != VINF_SUCCESS)
5222 {
5223 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5224 return rcStrict;
5225 }
5226
5227 /* Must be a 64-bit code segment. */
5228 if (!DescCS.Long.Gen.u1DescType)
5229 {
5230 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5231 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5232 }
5233 if ( !DescCS.Long.Gen.u1Long
5234 || DescCS.Long.Gen.u1DefBig
5235 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5236 {
5237 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5238 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5239 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5240 }
5241
5242 /* Don't allow lowering the privilege level. For non-conforming CS
5243 selectors, the CS.DPL sets the privilege level the trap/interrupt
5244 handler runs at. For conforming CS selectors, the CPL remains
5245 unchanged, but the CS.DPL must be <= CPL. */
5246 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5247 * when CPU in Ring-0. Result \#GP? */
5248 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5249 {
5250 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5251 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5252 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5253 }
5254
5255
5256 /* Make sure the selector is present. */
5257 if (!DescCS.Legacy.Gen.u1Present)
5258 {
5259 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5260 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5261 }
5262
5263 /* Check that the new RIP is canonical. */
5264 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5265 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5266 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5267 if (!IEM_IS_CANONICAL(uNewRip))
5268 {
5269 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5270 return iemRaiseGeneralProtectionFault0(pVCpu);
5271 }
5272
5273 /*
5274 * If the privilege level changes or if the IST isn't zero, we need to get
5275 * a new stack from the TSS.
5276 */
5277 uint64_t uNewRsp;
5278 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5279 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5280 if ( uNewCpl != pVCpu->iem.s.uCpl
5281 || Idte.Gate.u3IST != 0)
5282 {
5283 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5284 if (rcStrict != VINF_SUCCESS)
5285 return rcStrict;
5286 }
5287 else
5288 uNewRsp = pCtx->rsp;
5289 uNewRsp &= ~(uint64_t)0xf;
5290
5291 /*
5292 * Calc the flag image to push.
5293 */
5294 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5295 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5296 fEfl &= ~X86_EFL_RF;
5297 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5298 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5299
5300 /*
5301 * Start making changes.
5302 */
5303 /* Set the new CPL so that stack accesses use it. */
5304 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5305 pVCpu->iem.s.uCpl = uNewCpl;
5306
5307 /* Create the stack frame. */
5308 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5309 RTPTRUNION uStackFrame;
5310 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5311 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5312 if (rcStrict != VINF_SUCCESS)
5313 return rcStrict;
5314 void * const pvStackFrame = uStackFrame.pv;
5315
5316 if (fFlags & IEM_XCPT_FLAGS_ERR)
5317 *uStackFrame.pu64++ = uErr;
5318 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5319 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5320 uStackFrame.pu64[2] = fEfl;
5321 uStackFrame.pu64[3] = pCtx->rsp;
5322 uStackFrame.pu64[4] = pCtx->ss.Sel;
5323 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5324 if (rcStrict != VINF_SUCCESS)
5325 return rcStrict;
5326
5327 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5328 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5329 * after pushing the stack frame? (Write protect the gdt + stack to
5330 * find out.) */
5331 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5332 {
5333 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5334 if (rcStrict != VINF_SUCCESS)
5335 return rcStrict;
5336 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5337 }
5338
5339 /*
5340 * Start comitting the register changes.
5341 */
5342 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5343 * hidden registers when interrupting 32-bit or 16-bit code! */
5344 if (uNewCpl != uOldCpl)
5345 {
5346 pCtx->ss.Sel = 0 | uNewCpl;
5347 pCtx->ss.ValidSel = 0 | uNewCpl;
5348 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5349 pCtx->ss.u32Limit = UINT32_MAX;
5350 pCtx->ss.u64Base = 0;
5351 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5352 }
5353 pCtx->rsp = uNewRsp - cbStackFrame;
5354 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5355 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5356 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5357 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5358 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5359 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5360 pCtx->rip = uNewRip;
5361
5362 fEfl &= ~fEflToClear;
5363 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5364
5365 if (fFlags & IEM_XCPT_FLAGS_CR2)
5366 pCtx->cr2 = uCr2;
5367
5368 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5369 iemRaiseXcptAdjustState(pCtx, u8Vector);
5370
5371 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5372}
5373
5374
5375/**
5376 * Implements exceptions and interrupts.
5377 *
5378 * All exceptions and interrupts goes thru this function!
5379 *
5380 * @returns VBox strict status code.
5381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5382 * @param cbInstr The number of bytes to offset rIP by in the return
5383 * address.
5384 * @param u8Vector The interrupt / exception vector number.
5385 * @param fFlags The flags.
5386 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5387 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5388 */
5389DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5390iemRaiseXcptOrInt(PVMCPU pVCpu,
5391 uint8_t cbInstr,
5392 uint8_t u8Vector,
5393 uint32_t fFlags,
5394 uint16_t uErr,
5395 uint64_t uCr2)
5396{
5397 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5398#ifdef IN_RING0
5399 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5400 AssertRCReturn(rc, rc);
5401#endif
5402
5403#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5404 /*
5405 * Flush prefetch buffer
5406 */
5407 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5408#endif
5409
5410 /*
5411 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5412 */
5413 if ( pCtx->eflags.Bits.u1VM
5414 && pCtx->eflags.Bits.u2IOPL != 3
5415 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5416 && (pCtx->cr0 & X86_CR0_PE) )
5417 {
5418 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5419 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5420 u8Vector = X86_XCPT_GP;
5421 uErr = 0;
5422 }
5423#ifdef DBGFTRACE_ENABLED
5424 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5425 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5426 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5427#endif
5428
5429#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5430 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5431 {
5432 /*
5433 * If the event is being injected as part of VMRUN, it isn't subject to event
5434 * intercepts in the nested-guest. However, secondary exceptions that occur
5435 * during injection of any event -are- subject to exception intercepts.
5436 * See AMD spec. 15.20 "Event Injection".
5437 */
5438 if (!pCtx->hwvirt.svm.fInterceptEvents)
5439 pCtx->hwvirt.svm.fInterceptEvents = 1;
5440 else
5441 {
5442 /*
5443 * Check and handle if the event being raised is intercepted.
5444 */
5445 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5446 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5447 return rcStrict0;
5448 }
5449 }
5450#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5451
5452 /*
5453 * Do recursion accounting.
5454 */
5455 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5456 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5457 if (pVCpu->iem.s.cXcptRecursions == 0)
5458 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5459 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5460 else
5461 {
5462 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5463 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5464 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5465
5466 if (pVCpu->iem.s.cXcptRecursions >= 3)
5467 {
5468#ifdef DEBUG_bird
5469 AssertFailed();
5470#endif
5471 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5472 }
5473
5474 /*
5475 * Evaluate the sequence of recurring events.
5476 */
5477 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5478 NULL /* pXcptRaiseInfo */);
5479 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5480 { /* likely */ }
5481 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5482 {
5483 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5484 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5485 u8Vector = X86_XCPT_DF;
5486 uErr = 0;
5487 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5488 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5489 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5490 }
5491 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5492 {
5493 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5494 return iemInitiateCpuShutdown(pVCpu);
5495 }
5496 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5497 {
5498 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5499 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5500 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5501 return VERR_EM_GUEST_CPU_HANG;
5502 }
5503 else
5504 {
5505 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5506 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5507 return VERR_IEM_IPE_9;
5508 }
5509
5510 /*
5511 * The 'EXT' bit is set when an exception occurs during deliver of an external
5512 * event (such as an interrupt or earlier exception)[1]. Privileged software
5513 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5514 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5515 *
5516 * [1] - Intel spec. 6.13 "Error Code"
5517 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5518 * [3] - Intel Instruction reference for INT n.
5519 */
5520 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5521 && (fFlags & IEM_XCPT_FLAGS_ERR)
5522 && u8Vector != X86_XCPT_PF
5523 && u8Vector != X86_XCPT_DF)
5524 {
5525 uErr |= X86_TRAP_ERR_EXTERNAL;
5526 }
5527 }
5528
5529 pVCpu->iem.s.cXcptRecursions++;
5530 pVCpu->iem.s.uCurXcpt = u8Vector;
5531 pVCpu->iem.s.fCurXcpt = fFlags;
5532 pVCpu->iem.s.uCurXcptErr = uErr;
5533 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5534
5535 /*
5536 * Extensive logging.
5537 */
5538#if defined(LOG_ENABLED) && defined(IN_RING3)
5539 if (LogIs3Enabled())
5540 {
5541 PVM pVM = pVCpu->CTX_SUFF(pVM);
5542 char szRegs[4096];
5543 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5544 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5545 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5546 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5547 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5548 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5549 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5550 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5551 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5552 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5553 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5554 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5555 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5556 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5557 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5558 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5559 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5560 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5561 " efer=%016VR{efer}\n"
5562 " pat=%016VR{pat}\n"
5563 " sf_mask=%016VR{sf_mask}\n"
5564 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5565 " lstar=%016VR{lstar}\n"
5566 " star=%016VR{star} cstar=%016VR{cstar}\n"
5567 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5568 );
5569
5570 char szInstr[256];
5571 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5572 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5573 szInstr, sizeof(szInstr), NULL);
5574 Log3(("%s%s\n", szRegs, szInstr));
5575 }
5576#endif /* LOG_ENABLED */
5577
5578 /*
5579 * Call the mode specific worker function.
5580 */
5581 VBOXSTRICTRC rcStrict;
5582 if (!(pCtx->cr0 & X86_CR0_PE))
5583 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5584 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5585 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5586 else
5587 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5588
5589 /* Flush the prefetch buffer. */
5590#ifdef IEM_WITH_CODE_TLB
5591 pVCpu->iem.s.pbInstrBuf = NULL;
5592#else
5593 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5594#endif
5595
5596 /*
5597 * Unwind.
5598 */
5599 pVCpu->iem.s.cXcptRecursions--;
5600 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5601 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5602 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5603 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl,
5604 pVCpu->iem.s.cXcptRecursions + 1));
5605 return rcStrict;
5606}
5607
5608#ifdef IEM_WITH_SETJMP
5609/**
5610 * See iemRaiseXcptOrInt. Will not return.
5611 */
5612IEM_STATIC DECL_NO_RETURN(void)
5613iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5614 uint8_t cbInstr,
5615 uint8_t u8Vector,
5616 uint32_t fFlags,
5617 uint16_t uErr,
5618 uint64_t uCr2)
5619{
5620 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5621 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5622}
5623#endif
5624
5625
5626/** \#DE - 00. */
5627DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5628{
5629 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5630}
5631
5632
5633/** \#DB - 01.
5634 * @note This automatically clear DR7.GD. */
5635DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5636{
5637 /** @todo set/clear RF. */
5638 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5639 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5640}
5641
5642
5643/** \#BR - 05. */
5644DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5645{
5646 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5647}
5648
5649
5650/** \#UD - 06. */
5651DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5652{
5653 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5654}
5655
5656
5657/** \#NM - 07. */
5658DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5659{
5660 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5661}
5662
5663
5664/** \#TS(err) - 0a. */
5665DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5666{
5667 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5668}
5669
5670
5671/** \#TS(tr) - 0a. */
5672DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5673{
5674 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5675 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5676}
5677
5678
5679/** \#TS(0) - 0a. */
5680DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5681{
5682 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5683 0, 0);
5684}
5685
5686
5687/** \#TS(err) - 0a. */
5688DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5689{
5690 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5691 uSel & X86_SEL_MASK_OFF_RPL, 0);
5692}
5693
5694
5695/** \#NP(err) - 0b. */
5696DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5697{
5698 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5699}
5700
5701
5702/** \#NP(sel) - 0b. */
5703DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5704{
5705 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5706 uSel & ~X86_SEL_RPL, 0);
5707}
5708
5709
5710/** \#SS(seg) - 0c. */
5711DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5712{
5713 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5714 uSel & ~X86_SEL_RPL, 0);
5715}
5716
5717
5718/** \#SS(err) - 0c. */
5719DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5720{
5721 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5722}
5723
5724
5725/** \#GP(n) - 0d. */
5726DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5727{
5728 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5729}
5730
5731
5732/** \#GP(0) - 0d. */
5733DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5734{
5735 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5736}
5737
5738#ifdef IEM_WITH_SETJMP
5739/** \#GP(0) - 0d. */
5740DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5741{
5742 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5743}
5744#endif
5745
5746
5747/** \#GP(sel) - 0d. */
5748DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5749{
5750 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5751 Sel & ~X86_SEL_RPL, 0);
5752}
5753
5754
5755/** \#GP(0) - 0d. */
5756DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5757{
5758 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5759}
5760
5761
5762/** \#GP(sel) - 0d. */
5763DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5764{
5765 NOREF(iSegReg); NOREF(fAccess);
5766 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5767 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5768}
5769
5770#ifdef IEM_WITH_SETJMP
5771/** \#GP(sel) - 0d, longjmp. */
5772DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5773{
5774 NOREF(iSegReg); NOREF(fAccess);
5775 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5776 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5777}
5778#endif
5779
5780/** \#GP(sel) - 0d. */
5781DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5782{
5783 NOREF(Sel);
5784 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5785}
5786
5787#ifdef IEM_WITH_SETJMP
5788/** \#GP(sel) - 0d, longjmp. */
5789DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5790{
5791 NOREF(Sel);
5792 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5793}
5794#endif
5795
5796
5797/** \#GP(sel) - 0d. */
5798DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5799{
5800 NOREF(iSegReg); NOREF(fAccess);
5801 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5802}
5803
5804#ifdef IEM_WITH_SETJMP
5805/** \#GP(sel) - 0d, longjmp. */
5806DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5807 uint32_t fAccess)
5808{
5809 NOREF(iSegReg); NOREF(fAccess);
5810 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5811}
5812#endif
5813
5814
5815/** \#PF(n) - 0e. */
5816DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5817{
5818 uint16_t uErr;
5819 switch (rc)
5820 {
5821 case VERR_PAGE_NOT_PRESENT:
5822 case VERR_PAGE_TABLE_NOT_PRESENT:
5823 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5824 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5825 uErr = 0;
5826 break;
5827
5828 default:
5829 AssertMsgFailed(("%Rrc\n", rc));
5830 RT_FALL_THRU();
5831 case VERR_ACCESS_DENIED:
5832 uErr = X86_TRAP_PF_P;
5833 break;
5834
5835 /** @todo reserved */
5836 }
5837
5838 if (pVCpu->iem.s.uCpl == 3)
5839 uErr |= X86_TRAP_PF_US;
5840
5841 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5842 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5843 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5844 uErr |= X86_TRAP_PF_ID;
5845
5846#if 0 /* This is so much non-sense, really. Why was it done like that? */
5847 /* Note! RW access callers reporting a WRITE protection fault, will clear
5848 the READ flag before calling. So, read-modify-write accesses (RW)
5849 can safely be reported as READ faults. */
5850 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5851 uErr |= X86_TRAP_PF_RW;
5852#else
5853 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5854 {
5855 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5856 uErr |= X86_TRAP_PF_RW;
5857 }
5858#endif
5859
5860 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5861 uErr, GCPtrWhere);
5862}
5863
5864#ifdef IEM_WITH_SETJMP
5865/** \#PF(n) - 0e, longjmp. */
5866IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5867{
5868 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5869}
5870#endif
5871
5872
5873/** \#MF(0) - 10. */
5874DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5875{
5876 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5877}
5878
5879
5880/** \#AC(0) - 11. */
5881DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5882{
5883 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5884}
5885
5886
5887/**
5888 * Macro for calling iemCImplRaiseDivideError().
5889 *
5890 * This enables us to add/remove arguments and force different levels of
5891 * inlining as we wish.
5892 *
5893 * @return Strict VBox status code.
5894 */
5895#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5896IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5897{
5898 NOREF(cbInstr);
5899 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5900}
5901
5902
5903/**
5904 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5905 *
5906 * This enables us to add/remove arguments and force different levels of
5907 * inlining as we wish.
5908 *
5909 * @return Strict VBox status code.
5910 */
5911#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5912IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5913{
5914 NOREF(cbInstr);
5915 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5916}
5917
5918
5919/**
5920 * Macro for calling iemCImplRaiseInvalidOpcode().
5921 *
5922 * This enables us to add/remove arguments and force different levels of
5923 * inlining as we wish.
5924 *
5925 * @return Strict VBox status code.
5926 */
5927#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5928IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5929{
5930 NOREF(cbInstr);
5931 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5932}
5933
5934
5935/** @} */
5936
5937
5938/*
5939 *
5940 * Helpers routines.
5941 * Helpers routines.
5942 * Helpers routines.
5943 *
5944 */
5945
5946/**
5947 * Recalculates the effective operand size.
5948 *
5949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5950 */
5951IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5952{
5953 switch (pVCpu->iem.s.enmCpuMode)
5954 {
5955 case IEMMODE_16BIT:
5956 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5957 break;
5958 case IEMMODE_32BIT:
5959 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5960 break;
5961 case IEMMODE_64BIT:
5962 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5963 {
5964 case 0:
5965 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5966 break;
5967 case IEM_OP_PRF_SIZE_OP:
5968 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5969 break;
5970 case IEM_OP_PRF_SIZE_REX_W:
5971 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5972 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5973 break;
5974 }
5975 break;
5976 default:
5977 AssertFailed();
5978 }
5979}
5980
5981
5982/**
5983 * Sets the default operand size to 64-bit and recalculates the effective
5984 * operand size.
5985 *
5986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5987 */
5988IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5989{
5990 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5991 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5992 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5993 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5994 else
5995 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5996}
5997
5998
5999/*
6000 *
6001 * Common opcode decoders.
6002 * Common opcode decoders.
6003 * Common opcode decoders.
6004 *
6005 */
6006//#include <iprt/mem.h>
6007
6008/**
6009 * Used to add extra details about a stub case.
6010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6011 */
6012IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6013{
6014#if defined(LOG_ENABLED) && defined(IN_RING3)
6015 PVM pVM = pVCpu->CTX_SUFF(pVM);
6016 char szRegs[4096];
6017 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6018 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6019 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6020 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6021 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6022 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6023 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6024 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6025 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6026 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6027 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6028 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6029 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6030 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6031 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6032 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6033 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6034 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6035 " efer=%016VR{efer}\n"
6036 " pat=%016VR{pat}\n"
6037 " sf_mask=%016VR{sf_mask}\n"
6038 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6039 " lstar=%016VR{lstar}\n"
6040 " star=%016VR{star} cstar=%016VR{cstar}\n"
6041 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6042 );
6043
6044 char szInstr[256];
6045 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6046 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6047 szInstr, sizeof(szInstr), NULL);
6048
6049 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6050#else
6051 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6052#endif
6053}
6054
6055/**
6056 * Complains about a stub.
6057 *
6058 * Providing two versions of this macro, one for daily use and one for use when
6059 * working on IEM.
6060 */
6061#if 0
6062# define IEMOP_BITCH_ABOUT_STUB() \
6063 do { \
6064 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6065 iemOpStubMsg2(pVCpu); \
6066 RTAssertPanic(); \
6067 } while (0)
6068#else
6069# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6070#endif
6071
6072/** Stubs an opcode. */
6073#define FNIEMOP_STUB(a_Name) \
6074 FNIEMOP_DEF(a_Name) \
6075 { \
6076 RT_NOREF_PV(pVCpu); \
6077 IEMOP_BITCH_ABOUT_STUB(); \
6078 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6079 } \
6080 typedef int ignore_semicolon
6081
6082/** Stubs an opcode. */
6083#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6084 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6085 { \
6086 RT_NOREF_PV(pVCpu); \
6087 RT_NOREF_PV(a_Name0); \
6088 IEMOP_BITCH_ABOUT_STUB(); \
6089 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6090 } \
6091 typedef int ignore_semicolon
6092
6093/** Stubs an opcode which currently should raise \#UD. */
6094#define FNIEMOP_UD_STUB(a_Name) \
6095 FNIEMOP_DEF(a_Name) \
6096 { \
6097 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6098 return IEMOP_RAISE_INVALID_OPCODE(); \
6099 } \
6100 typedef int ignore_semicolon
6101
6102/** Stubs an opcode which currently should raise \#UD. */
6103#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6104 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6105 { \
6106 RT_NOREF_PV(pVCpu); \
6107 RT_NOREF_PV(a_Name0); \
6108 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6109 return IEMOP_RAISE_INVALID_OPCODE(); \
6110 } \
6111 typedef int ignore_semicolon
6112
6113
6114
6115/** @name Register Access.
6116 * @{
6117 */
6118
6119/**
6120 * Gets a reference (pointer) to the specified hidden segment register.
6121 *
6122 * @returns Hidden register reference.
6123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6124 * @param iSegReg The segment register.
6125 */
6126IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6127{
6128 Assert(iSegReg < X86_SREG_COUNT);
6129 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6130 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6131
6132#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6133 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6134 { /* likely */ }
6135 else
6136 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6137#else
6138 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6139#endif
6140 return pSReg;
6141}
6142
6143
6144/**
6145 * Ensures that the given hidden segment register is up to date.
6146 *
6147 * @returns Hidden register reference.
6148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6149 * @param pSReg The segment register.
6150 */
6151IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6152{
6153#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6154 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6155 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6156#else
6157 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6158 NOREF(pVCpu);
6159#endif
6160 return pSReg;
6161}
6162
6163
6164/**
6165 * Gets a reference (pointer) to the specified segment register (the selector
6166 * value).
6167 *
6168 * @returns Pointer to the selector variable.
6169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6170 * @param iSegReg The segment register.
6171 */
6172DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6173{
6174 Assert(iSegReg < X86_SREG_COUNT);
6175 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6176 return &pCtx->aSRegs[iSegReg].Sel;
6177}
6178
6179
6180/**
6181 * Fetches the selector value of a segment register.
6182 *
6183 * @returns The selector value.
6184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6185 * @param iSegReg The segment register.
6186 */
6187DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6188{
6189 Assert(iSegReg < X86_SREG_COUNT);
6190 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6191}
6192
6193
6194/**
6195 * Fetches the base address value of a segment register.
6196 *
6197 * @returns The selector value.
6198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6199 * @param iSegReg The segment register.
6200 */
6201DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6202{
6203 Assert(iSegReg < X86_SREG_COUNT);
6204 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].u64Base;
6205}
6206
6207
6208/**
6209 * Gets a reference (pointer) to the specified general purpose register.
6210 *
6211 * @returns Register reference.
6212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6213 * @param iReg The general purpose register.
6214 */
6215DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6216{
6217 Assert(iReg < 16);
6218 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6219 return &pCtx->aGRegs[iReg];
6220}
6221
6222
6223/**
6224 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6225 *
6226 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6227 *
6228 * @returns Register reference.
6229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6230 * @param iReg The register.
6231 */
6232DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6233{
6234 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6235 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6236 {
6237 Assert(iReg < 16);
6238 return &pCtx->aGRegs[iReg].u8;
6239 }
6240 /* high 8-bit register. */
6241 Assert(iReg < 8);
6242 return &pCtx->aGRegs[iReg & 3].bHi;
6243}
6244
6245
6246/**
6247 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6248 *
6249 * @returns Register reference.
6250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6251 * @param iReg The register.
6252 */
6253DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6254{
6255 Assert(iReg < 16);
6256 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6257 return &pCtx->aGRegs[iReg].u16;
6258}
6259
6260
6261/**
6262 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6263 *
6264 * @returns Register reference.
6265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6266 * @param iReg The register.
6267 */
6268DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6269{
6270 Assert(iReg < 16);
6271 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6272 return &pCtx->aGRegs[iReg].u32;
6273}
6274
6275
6276/**
6277 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6278 *
6279 * @returns Register reference.
6280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6281 * @param iReg The register.
6282 */
6283DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6284{
6285 Assert(iReg < 64);
6286 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6287 return &pCtx->aGRegs[iReg].u64;
6288}
6289
6290
6291/**
6292 * Gets a reference (pointer) to the specified segment register's base address.
6293 *
6294 * @returns Segment register base address reference.
6295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6296 * @param iSegReg The segment selector.
6297 */
6298DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6299{
6300 Assert(iSegReg < X86_SREG_COUNT);
6301 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6302 return &pCtx->aSRegs[iSegReg].u64Base;
6303}
6304
6305
6306/**
6307 * Fetches the value of a 8-bit general purpose register.
6308 *
6309 * @returns The register value.
6310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6311 * @param iReg The register.
6312 */
6313DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6314{
6315 return *iemGRegRefU8(pVCpu, iReg);
6316}
6317
6318
6319/**
6320 * Fetches the value of a 16-bit general purpose register.
6321 *
6322 * @returns The register value.
6323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6324 * @param iReg The register.
6325 */
6326DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6327{
6328 Assert(iReg < 16);
6329 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6330}
6331
6332
6333/**
6334 * Fetches the value of a 32-bit general purpose register.
6335 *
6336 * @returns The register value.
6337 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6338 * @param iReg The register.
6339 */
6340DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6341{
6342 Assert(iReg < 16);
6343 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6344}
6345
6346
6347/**
6348 * Fetches the value of a 64-bit general purpose register.
6349 *
6350 * @returns The register value.
6351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6352 * @param iReg The register.
6353 */
6354DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6355{
6356 Assert(iReg < 16);
6357 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6358}
6359
6360
6361/**
6362 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6363 *
6364 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6365 * segment limit.
6366 *
6367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6368 * @param offNextInstr The offset of the next instruction.
6369 */
6370IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6371{
6372 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6373 switch (pVCpu->iem.s.enmEffOpSize)
6374 {
6375 case IEMMODE_16BIT:
6376 {
6377 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6378 if ( uNewIp > pCtx->cs.u32Limit
6379 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6380 return iemRaiseGeneralProtectionFault0(pVCpu);
6381 pCtx->rip = uNewIp;
6382 break;
6383 }
6384
6385 case IEMMODE_32BIT:
6386 {
6387 Assert(pCtx->rip <= UINT32_MAX);
6388 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6389
6390 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6391 if (uNewEip > pCtx->cs.u32Limit)
6392 return iemRaiseGeneralProtectionFault0(pVCpu);
6393 pCtx->rip = uNewEip;
6394 break;
6395 }
6396
6397 case IEMMODE_64BIT:
6398 {
6399 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6400
6401 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6402 if (!IEM_IS_CANONICAL(uNewRip))
6403 return iemRaiseGeneralProtectionFault0(pVCpu);
6404 pCtx->rip = uNewRip;
6405 break;
6406 }
6407
6408 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6409 }
6410
6411 pCtx->eflags.Bits.u1RF = 0;
6412
6413#ifndef IEM_WITH_CODE_TLB
6414 /* Flush the prefetch buffer. */
6415 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6416#endif
6417
6418 return VINF_SUCCESS;
6419}
6420
6421
6422/**
6423 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6424 *
6425 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6426 * segment limit.
6427 *
6428 * @returns Strict VBox status code.
6429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6430 * @param offNextInstr The offset of the next instruction.
6431 */
6432IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6433{
6434 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6435 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6436
6437 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6438 if ( uNewIp > pCtx->cs.u32Limit
6439 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6440 return iemRaiseGeneralProtectionFault0(pVCpu);
6441 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6442 pCtx->rip = uNewIp;
6443 pCtx->eflags.Bits.u1RF = 0;
6444
6445#ifndef IEM_WITH_CODE_TLB
6446 /* Flush the prefetch buffer. */
6447 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6448#endif
6449
6450 return VINF_SUCCESS;
6451}
6452
6453
6454/**
6455 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6456 *
6457 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6458 * segment limit.
6459 *
6460 * @returns Strict VBox status code.
6461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6462 * @param offNextInstr The offset of the next instruction.
6463 */
6464IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6465{
6466 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6467 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6468
6469 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6470 {
6471 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6472
6473 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6474 if (uNewEip > pCtx->cs.u32Limit)
6475 return iemRaiseGeneralProtectionFault0(pVCpu);
6476 pCtx->rip = uNewEip;
6477 }
6478 else
6479 {
6480 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6481
6482 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6483 if (!IEM_IS_CANONICAL(uNewRip))
6484 return iemRaiseGeneralProtectionFault0(pVCpu);
6485 pCtx->rip = uNewRip;
6486 }
6487 pCtx->eflags.Bits.u1RF = 0;
6488
6489#ifndef IEM_WITH_CODE_TLB
6490 /* Flush the prefetch buffer. */
6491 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6492#endif
6493
6494 return VINF_SUCCESS;
6495}
6496
6497
6498/**
6499 * Performs a near jump to the specified address.
6500 *
6501 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6502 * segment limit.
6503 *
6504 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6505 * @param uNewRip The new RIP value.
6506 */
6507IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6508{
6509 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6510 switch (pVCpu->iem.s.enmEffOpSize)
6511 {
6512 case IEMMODE_16BIT:
6513 {
6514 Assert(uNewRip <= UINT16_MAX);
6515 if ( uNewRip > pCtx->cs.u32Limit
6516 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6517 return iemRaiseGeneralProtectionFault0(pVCpu);
6518 /** @todo Test 16-bit jump in 64-bit mode. */
6519 pCtx->rip = uNewRip;
6520 break;
6521 }
6522
6523 case IEMMODE_32BIT:
6524 {
6525 Assert(uNewRip <= UINT32_MAX);
6526 Assert(pCtx->rip <= UINT32_MAX);
6527 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6528
6529 if (uNewRip > pCtx->cs.u32Limit)
6530 return iemRaiseGeneralProtectionFault0(pVCpu);
6531 pCtx->rip = uNewRip;
6532 break;
6533 }
6534
6535 case IEMMODE_64BIT:
6536 {
6537 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6538
6539 if (!IEM_IS_CANONICAL(uNewRip))
6540 return iemRaiseGeneralProtectionFault0(pVCpu);
6541 pCtx->rip = uNewRip;
6542 break;
6543 }
6544
6545 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6546 }
6547
6548 pCtx->eflags.Bits.u1RF = 0;
6549
6550#ifndef IEM_WITH_CODE_TLB
6551 /* Flush the prefetch buffer. */
6552 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6553#endif
6554
6555 return VINF_SUCCESS;
6556}
6557
6558
6559/**
6560 * Get the address of the top of the stack.
6561 *
6562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6563 * @param pCtx The CPU context which SP/ESP/RSP should be
6564 * read.
6565 */
6566DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6567{
6568 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6569 return pCtx->rsp;
6570 if (pCtx->ss.Attr.n.u1DefBig)
6571 return pCtx->esp;
6572 return pCtx->sp;
6573}
6574
6575
6576/**
6577 * Updates the RIP/EIP/IP to point to the next instruction.
6578 *
6579 * This function leaves the EFLAGS.RF flag alone.
6580 *
6581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6582 * @param cbInstr The number of bytes to add.
6583 */
6584IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6585{
6586 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6587 switch (pVCpu->iem.s.enmCpuMode)
6588 {
6589 case IEMMODE_16BIT:
6590 Assert(pCtx->rip <= UINT16_MAX);
6591 pCtx->eip += cbInstr;
6592 pCtx->eip &= UINT32_C(0xffff);
6593 break;
6594
6595 case IEMMODE_32BIT:
6596 pCtx->eip += cbInstr;
6597 Assert(pCtx->rip <= UINT32_MAX);
6598 break;
6599
6600 case IEMMODE_64BIT:
6601 pCtx->rip += cbInstr;
6602 break;
6603 default: AssertFailed();
6604 }
6605}
6606
6607
6608#if 0
6609/**
6610 * Updates the RIP/EIP/IP to point to the next instruction.
6611 *
6612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6613 */
6614IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6615{
6616 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6617}
6618#endif
6619
6620
6621
6622/**
6623 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6624 *
6625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6626 * @param cbInstr The number of bytes to add.
6627 */
6628IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6629{
6630 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6631
6632 pCtx->eflags.Bits.u1RF = 0;
6633
6634 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6635#if ARCH_BITS >= 64
6636 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6637 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6638 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6639#else
6640 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6641 pCtx->rip += cbInstr;
6642 else
6643 pCtx->eip += cbInstr;
6644#endif
6645}
6646
6647
6648/**
6649 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6650 *
6651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6652 */
6653IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6654{
6655 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6656}
6657
6658
6659/**
6660 * Adds to the stack pointer.
6661 *
6662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6663 * @param pCtx The CPU context which SP/ESP/RSP should be
6664 * updated.
6665 * @param cbToAdd The number of bytes to add (8-bit!).
6666 */
6667DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6668{
6669 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6670 pCtx->rsp += cbToAdd;
6671 else if (pCtx->ss.Attr.n.u1DefBig)
6672 pCtx->esp += cbToAdd;
6673 else
6674 pCtx->sp += cbToAdd;
6675}
6676
6677
6678/**
6679 * Subtracts from the stack pointer.
6680 *
6681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6682 * @param pCtx The CPU context which SP/ESP/RSP should be
6683 * updated.
6684 * @param cbToSub The number of bytes to subtract (8-bit!).
6685 */
6686DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6687{
6688 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6689 pCtx->rsp -= cbToSub;
6690 else if (pCtx->ss.Attr.n.u1DefBig)
6691 pCtx->esp -= cbToSub;
6692 else
6693 pCtx->sp -= cbToSub;
6694}
6695
6696
6697/**
6698 * Adds to the temporary stack pointer.
6699 *
6700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6701 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6702 * @param cbToAdd The number of bytes to add (16-bit).
6703 * @param pCtx Where to get the current stack mode.
6704 */
6705DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6706{
6707 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6708 pTmpRsp->u += cbToAdd;
6709 else if (pCtx->ss.Attr.n.u1DefBig)
6710 pTmpRsp->DWords.dw0 += cbToAdd;
6711 else
6712 pTmpRsp->Words.w0 += cbToAdd;
6713}
6714
6715
6716/**
6717 * Subtracts from the temporary stack pointer.
6718 *
6719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6720 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6721 * @param cbToSub The number of bytes to subtract.
6722 * @param pCtx Where to get the current stack mode.
6723 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6724 * expecting that.
6725 */
6726DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6727{
6728 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6729 pTmpRsp->u -= cbToSub;
6730 else if (pCtx->ss.Attr.n.u1DefBig)
6731 pTmpRsp->DWords.dw0 -= cbToSub;
6732 else
6733 pTmpRsp->Words.w0 -= cbToSub;
6734}
6735
6736
6737/**
6738 * Calculates the effective stack address for a push of the specified size as
6739 * well as the new RSP value (upper bits may be masked).
6740 *
6741 * @returns Effective stack addressf for the push.
6742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6743 * @param pCtx Where to get the current stack mode.
6744 * @param cbItem The size of the stack item to pop.
6745 * @param puNewRsp Where to return the new RSP value.
6746 */
6747DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6748{
6749 RTUINT64U uTmpRsp;
6750 RTGCPTR GCPtrTop;
6751 uTmpRsp.u = pCtx->rsp;
6752
6753 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6754 GCPtrTop = uTmpRsp.u -= cbItem;
6755 else if (pCtx->ss.Attr.n.u1DefBig)
6756 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6757 else
6758 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6759 *puNewRsp = uTmpRsp.u;
6760 return GCPtrTop;
6761}
6762
6763
6764/**
6765 * Gets the current stack pointer and calculates the value after a pop of the
6766 * specified size.
6767 *
6768 * @returns Current stack pointer.
6769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6770 * @param pCtx Where to get the current stack mode.
6771 * @param cbItem The size of the stack item to pop.
6772 * @param puNewRsp Where to return the new RSP value.
6773 */
6774DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6775{
6776 RTUINT64U uTmpRsp;
6777 RTGCPTR GCPtrTop;
6778 uTmpRsp.u = pCtx->rsp;
6779
6780 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6781 {
6782 GCPtrTop = uTmpRsp.u;
6783 uTmpRsp.u += cbItem;
6784 }
6785 else if (pCtx->ss.Attr.n.u1DefBig)
6786 {
6787 GCPtrTop = uTmpRsp.DWords.dw0;
6788 uTmpRsp.DWords.dw0 += cbItem;
6789 }
6790 else
6791 {
6792 GCPtrTop = uTmpRsp.Words.w0;
6793 uTmpRsp.Words.w0 += cbItem;
6794 }
6795 *puNewRsp = uTmpRsp.u;
6796 return GCPtrTop;
6797}
6798
6799
6800/**
6801 * Calculates the effective stack address for a push of the specified size as
6802 * well as the new temporary RSP value (upper bits may be masked).
6803 *
6804 * @returns Effective stack addressf for the push.
6805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6806 * @param pCtx Where to get the current stack mode.
6807 * @param pTmpRsp The temporary stack pointer. This is updated.
6808 * @param cbItem The size of the stack item to pop.
6809 */
6810DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6811{
6812 RTGCPTR GCPtrTop;
6813
6814 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6815 GCPtrTop = pTmpRsp->u -= cbItem;
6816 else if (pCtx->ss.Attr.n.u1DefBig)
6817 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6818 else
6819 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6820 return GCPtrTop;
6821}
6822
6823
6824/**
6825 * Gets the effective stack address for a pop of the specified size and
6826 * calculates and updates the temporary RSP.
6827 *
6828 * @returns Current stack pointer.
6829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6830 * @param pCtx Where to get the current stack mode.
6831 * @param pTmpRsp The temporary stack pointer. This is updated.
6832 * @param cbItem The size of the stack item to pop.
6833 */
6834DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6835{
6836 RTGCPTR GCPtrTop;
6837 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6838 {
6839 GCPtrTop = pTmpRsp->u;
6840 pTmpRsp->u += cbItem;
6841 }
6842 else if (pCtx->ss.Attr.n.u1DefBig)
6843 {
6844 GCPtrTop = pTmpRsp->DWords.dw0;
6845 pTmpRsp->DWords.dw0 += cbItem;
6846 }
6847 else
6848 {
6849 GCPtrTop = pTmpRsp->Words.w0;
6850 pTmpRsp->Words.w0 += cbItem;
6851 }
6852 return GCPtrTop;
6853}
6854
6855/** @} */
6856
6857
6858/** @name FPU access and helpers.
6859 *
6860 * @{
6861 */
6862
6863
6864/**
6865 * Hook for preparing to use the host FPU.
6866 *
6867 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6868 *
6869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6870 */
6871DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6872{
6873#ifdef IN_RING3
6874 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6875#else
6876 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6877#endif
6878}
6879
6880
6881/**
6882 * Hook for preparing to use the host FPU for SSE.
6883 *
6884 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6885 *
6886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6887 */
6888DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6889{
6890 iemFpuPrepareUsage(pVCpu);
6891}
6892
6893
6894/**
6895 * Hook for preparing to use the host FPU for AVX.
6896 *
6897 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6898 *
6899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6900 */
6901DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6902{
6903 iemFpuPrepareUsage(pVCpu);
6904}
6905
6906
6907/**
6908 * Hook for actualizing the guest FPU state before the interpreter reads it.
6909 *
6910 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6911 *
6912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6913 */
6914DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6915{
6916#ifdef IN_RING3
6917 NOREF(pVCpu);
6918#else
6919 CPUMRZFpuStateActualizeForRead(pVCpu);
6920#endif
6921}
6922
6923
6924/**
6925 * Hook for actualizing the guest FPU state before the interpreter changes it.
6926 *
6927 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6928 *
6929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6930 */
6931DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6932{
6933#ifdef IN_RING3
6934 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6935#else
6936 CPUMRZFpuStateActualizeForChange(pVCpu);
6937#endif
6938}
6939
6940
6941/**
6942 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6943 * only.
6944 *
6945 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6946 *
6947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6948 */
6949DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6950{
6951#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6952 NOREF(pVCpu);
6953#else
6954 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6955#endif
6956}
6957
6958
6959/**
6960 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6961 * read+write.
6962 *
6963 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6964 *
6965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6966 */
6967DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6968{
6969#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6970 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6971#else
6972 CPUMRZFpuStateActualizeForChange(pVCpu);
6973#endif
6974}
6975
6976
6977/**
6978 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6979 * only.
6980 *
6981 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6982 *
6983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6984 */
6985DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6986{
6987#ifdef IN_RING3
6988 NOREF(pVCpu);
6989#else
6990 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6991#endif
6992}
6993
6994
6995/**
6996 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6997 * read+write.
6998 *
6999 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7000 *
7001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7002 */
7003DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7004{
7005#ifdef IN_RING3
7006 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7007#else
7008 CPUMRZFpuStateActualizeForChange(pVCpu);
7009#endif
7010}
7011
7012
7013/**
7014 * Stores a QNaN value into a FPU register.
7015 *
7016 * @param pReg Pointer to the register.
7017 */
7018DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7019{
7020 pReg->au32[0] = UINT32_C(0x00000000);
7021 pReg->au32[1] = UINT32_C(0xc0000000);
7022 pReg->au16[4] = UINT16_C(0xffff);
7023}
7024
7025
7026/**
7027 * Updates the FOP, FPU.CS and FPUIP registers.
7028 *
7029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7030 * @param pCtx The CPU context.
7031 * @param pFpuCtx The FPU context.
7032 */
7033DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
7034{
7035 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7036 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7037 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7038 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7039 {
7040 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7041 * happens in real mode here based on the fnsave and fnstenv images. */
7042 pFpuCtx->CS = 0;
7043 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
7044 }
7045 else
7046 {
7047 pFpuCtx->CS = pCtx->cs.Sel;
7048 pFpuCtx->FPUIP = pCtx->rip;
7049 }
7050}
7051
7052
7053/**
7054 * Updates the x87.DS and FPUDP registers.
7055 *
7056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7057 * @param pCtx The CPU context.
7058 * @param pFpuCtx The FPU context.
7059 * @param iEffSeg The effective segment register.
7060 * @param GCPtrEff The effective address relative to @a iEffSeg.
7061 */
7062DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7063{
7064 RTSEL sel;
7065 switch (iEffSeg)
7066 {
7067 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7068 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7069 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7070 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7071 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7072 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7073 default:
7074 AssertMsgFailed(("%d\n", iEffSeg));
7075 sel = pCtx->ds.Sel;
7076 }
7077 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7078 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7079 {
7080 pFpuCtx->DS = 0;
7081 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7082 }
7083 else
7084 {
7085 pFpuCtx->DS = sel;
7086 pFpuCtx->FPUDP = GCPtrEff;
7087 }
7088}
7089
7090
7091/**
7092 * Rotates the stack registers in the push direction.
7093 *
7094 * @param pFpuCtx The FPU context.
7095 * @remarks This is a complete waste of time, but fxsave stores the registers in
7096 * stack order.
7097 */
7098DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7099{
7100 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7101 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7102 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7103 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7104 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7105 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7106 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7107 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7108 pFpuCtx->aRegs[0].r80 = r80Tmp;
7109}
7110
7111
7112/**
7113 * Rotates the stack registers in the pop direction.
7114 *
7115 * @param pFpuCtx The FPU context.
7116 * @remarks This is a complete waste of time, but fxsave stores the registers in
7117 * stack order.
7118 */
7119DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7120{
7121 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7122 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7123 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7124 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7125 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7126 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7127 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7128 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7129 pFpuCtx->aRegs[7].r80 = r80Tmp;
7130}
7131
7132
7133/**
7134 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7135 * exception prevents it.
7136 *
7137 * @param pResult The FPU operation result to push.
7138 * @param pFpuCtx The FPU context.
7139 */
7140IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7141{
7142 /* Update FSW and bail if there are pending exceptions afterwards. */
7143 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7144 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7145 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7146 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7147 {
7148 pFpuCtx->FSW = fFsw;
7149 return;
7150 }
7151
7152 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7153 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7154 {
7155 /* All is fine, push the actual value. */
7156 pFpuCtx->FTW |= RT_BIT(iNewTop);
7157 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7158 }
7159 else if (pFpuCtx->FCW & X86_FCW_IM)
7160 {
7161 /* Masked stack overflow, push QNaN. */
7162 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7163 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7164 }
7165 else
7166 {
7167 /* Raise stack overflow, don't push anything. */
7168 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7169 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7170 return;
7171 }
7172
7173 fFsw &= ~X86_FSW_TOP_MASK;
7174 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7175 pFpuCtx->FSW = fFsw;
7176
7177 iemFpuRotateStackPush(pFpuCtx);
7178}
7179
7180
7181/**
7182 * Stores a result in a FPU register and updates the FSW and FTW.
7183 *
7184 * @param pFpuCtx The FPU context.
7185 * @param pResult The result to store.
7186 * @param iStReg Which FPU register to store it in.
7187 */
7188IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7189{
7190 Assert(iStReg < 8);
7191 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7192 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7193 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7194 pFpuCtx->FTW |= RT_BIT(iReg);
7195 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7196}
7197
7198
7199/**
7200 * Only updates the FPU status word (FSW) with the result of the current
7201 * instruction.
7202 *
7203 * @param pFpuCtx The FPU context.
7204 * @param u16FSW The FSW output of the current instruction.
7205 */
7206IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7207{
7208 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7209 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7210}
7211
7212
7213/**
7214 * Pops one item off the FPU stack if no pending exception prevents it.
7215 *
7216 * @param pFpuCtx The FPU context.
7217 */
7218IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7219{
7220 /* Check pending exceptions. */
7221 uint16_t uFSW = pFpuCtx->FSW;
7222 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7223 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7224 return;
7225
7226 /* TOP--. */
7227 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7228 uFSW &= ~X86_FSW_TOP_MASK;
7229 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7230 pFpuCtx->FSW = uFSW;
7231
7232 /* Mark the previous ST0 as empty. */
7233 iOldTop >>= X86_FSW_TOP_SHIFT;
7234 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7235
7236 /* Rotate the registers. */
7237 iemFpuRotateStackPop(pFpuCtx);
7238}
7239
7240
7241/**
7242 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7243 *
7244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7245 * @param pResult The FPU operation result to push.
7246 */
7247IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7248{
7249 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7250 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7251 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7252 iemFpuMaybePushResult(pResult, pFpuCtx);
7253}
7254
7255
7256/**
7257 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7258 * and sets FPUDP and FPUDS.
7259 *
7260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7261 * @param pResult The FPU operation result to push.
7262 * @param iEffSeg The effective segment register.
7263 * @param GCPtrEff The effective address relative to @a iEffSeg.
7264 */
7265IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7266{
7267 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7268 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7269 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7270 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7271 iemFpuMaybePushResult(pResult, pFpuCtx);
7272}
7273
7274
7275/**
7276 * Replace ST0 with the first value and push the second onto the FPU stack,
7277 * unless a pending exception prevents it.
7278 *
7279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7280 * @param pResult The FPU operation result to store and push.
7281 */
7282IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7283{
7284 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7285 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7286 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7287
7288 /* Update FSW and bail if there are pending exceptions afterwards. */
7289 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7290 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7291 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7292 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7293 {
7294 pFpuCtx->FSW = fFsw;
7295 return;
7296 }
7297
7298 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7299 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7300 {
7301 /* All is fine, push the actual value. */
7302 pFpuCtx->FTW |= RT_BIT(iNewTop);
7303 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7304 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7305 }
7306 else if (pFpuCtx->FCW & X86_FCW_IM)
7307 {
7308 /* Masked stack overflow, push QNaN. */
7309 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7310 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7311 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7312 }
7313 else
7314 {
7315 /* Raise stack overflow, don't push anything. */
7316 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7317 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7318 return;
7319 }
7320
7321 fFsw &= ~X86_FSW_TOP_MASK;
7322 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7323 pFpuCtx->FSW = fFsw;
7324
7325 iemFpuRotateStackPush(pFpuCtx);
7326}
7327
7328
7329/**
7330 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7331 * FOP.
7332 *
7333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7334 * @param pResult The result to store.
7335 * @param iStReg Which FPU register to store it in.
7336 */
7337IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7338{
7339 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7340 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7341 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7342 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7343}
7344
7345
7346/**
7347 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7348 * FOP, and then pops the stack.
7349 *
7350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7351 * @param pResult The result to store.
7352 * @param iStReg Which FPU register to store it in.
7353 */
7354IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7355{
7356 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7357 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7358 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7359 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7360 iemFpuMaybePopOne(pFpuCtx);
7361}
7362
7363
7364/**
7365 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7366 * FPUDP, and FPUDS.
7367 *
7368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7369 * @param pResult The result to store.
7370 * @param iStReg Which FPU register to store it in.
7371 * @param iEffSeg The effective memory operand selector register.
7372 * @param GCPtrEff The effective memory operand offset.
7373 */
7374IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7375 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7376{
7377 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7378 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7379 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7380 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7381 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7382}
7383
7384
7385/**
7386 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7387 * FPUDP, and FPUDS, and then pops the stack.
7388 *
7389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7390 * @param pResult The result to store.
7391 * @param iStReg Which FPU register to store it in.
7392 * @param iEffSeg The effective memory operand selector register.
7393 * @param GCPtrEff The effective memory operand offset.
7394 */
7395IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7396 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7397{
7398 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7399 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7400 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7401 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7402 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7403 iemFpuMaybePopOne(pFpuCtx);
7404}
7405
7406
7407/**
7408 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7409 *
7410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7411 */
7412IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7413{
7414 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7415 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7416 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7417}
7418
7419
7420/**
7421 * Marks the specified stack register as free (for FFREE).
7422 *
7423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7424 * @param iStReg The register to free.
7425 */
7426IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7427{
7428 Assert(iStReg < 8);
7429 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7430 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7431 pFpuCtx->FTW &= ~RT_BIT(iReg);
7432}
7433
7434
7435/**
7436 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7437 *
7438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7439 */
7440IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7441{
7442 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7443 uint16_t uFsw = pFpuCtx->FSW;
7444 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7445 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7446 uFsw &= ~X86_FSW_TOP_MASK;
7447 uFsw |= uTop;
7448 pFpuCtx->FSW = uFsw;
7449}
7450
7451
7452/**
7453 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7454 *
7455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7456 */
7457IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7458{
7459 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7460 uint16_t uFsw = pFpuCtx->FSW;
7461 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7462 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7463 uFsw &= ~X86_FSW_TOP_MASK;
7464 uFsw |= uTop;
7465 pFpuCtx->FSW = uFsw;
7466}
7467
7468
7469/**
7470 * Updates the FSW, FOP, FPUIP, and FPUCS.
7471 *
7472 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7473 * @param u16FSW The FSW from the current instruction.
7474 */
7475IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7476{
7477 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7478 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7479 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7480 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7481}
7482
7483
7484/**
7485 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7486 *
7487 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7488 * @param u16FSW The FSW from the current instruction.
7489 */
7490IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7491{
7492 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7493 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7494 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7495 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7496 iemFpuMaybePopOne(pFpuCtx);
7497}
7498
7499
7500/**
7501 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7502 *
7503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7504 * @param u16FSW The FSW from the current instruction.
7505 * @param iEffSeg The effective memory operand selector register.
7506 * @param GCPtrEff The effective memory operand offset.
7507 */
7508IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7509{
7510 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7511 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7512 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7513 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7514 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7515}
7516
7517
7518/**
7519 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7520 *
7521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7522 * @param u16FSW The FSW from the current instruction.
7523 */
7524IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7525{
7526 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7527 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7528 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7529 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7530 iemFpuMaybePopOne(pFpuCtx);
7531 iemFpuMaybePopOne(pFpuCtx);
7532}
7533
7534
7535/**
7536 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7537 *
7538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7539 * @param u16FSW The FSW from the current instruction.
7540 * @param iEffSeg The effective memory operand selector register.
7541 * @param GCPtrEff The effective memory operand offset.
7542 */
7543IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7544{
7545 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7546 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7547 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7548 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7549 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7550 iemFpuMaybePopOne(pFpuCtx);
7551}
7552
7553
7554/**
7555 * Worker routine for raising an FPU stack underflow exception.
7556 *
7557 * @param pFpuCtx The FPU context.
7558 * @param iStReg The stack register being accessed.
7559 */
7560IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7561{
7562 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7563 if (pFpuCtx->FCW & X86_FCW_IM)
7564 {
7565 /* Masked underflow. */
7566 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7567 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7568 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7569 if (iStReg != UINT8_MAX)
7570 {
7571 pFpuCtx->FTW |= RT_BIT(iReg);
7572 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7573 }
7574 }
7575 else
7576 {
7577 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7578 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7579 }
7580}
7581
7582
7583/**
7584 * Raises a FPU stack underflow exception.
7585 *
7586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7587 * @param iStReg The destination register that should be loaded
7588 * with QNaN if \#IS is not masked. Specify
7589 * UINT8_MAX if none (like for fcom).
7590 */
7591DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7592{
7593 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7594 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7595 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7596 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7597}
7598
7599
7600DECL_NO_INLINE(IEM_STATIC, void)
7601iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7602{
7603 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7604 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7605 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7606 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7607 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7608}
7609
7610
7611DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7612{
7613 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7614 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7615 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7616 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7617 iemFpuMaybePopOne(pFpuCtx);
7618}
7619
7620
7621DECL_NO_INLINE(IEM_STATIC, void)
7622iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7623{
7624 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7625 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7626 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7627 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7628 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7629 iemFpuMaybePopOne(pFpuCtx);
7630}
7631
7632
7633DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7634{
7635 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7636 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7637 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7638 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7639 iemFpuMaybePopOne(pFpuCtx);
7640 iemFpuMaybePopOne(pFpuCtx);
7641}
7642
7643
7644DECL_NO_INLINE(IEM_STATIC, void)
7645iemFpuStackPushUnderflow(PVMCPU pVCpu)
7646{
7647 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7648 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7649 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7650
7651 if (pFpuCtx->FCW & X86_FCW_IM)
7652 {
7653 /* Masked overflow - Push QNaN. */
7654 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7655 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7656 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7657 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7658 pFpuCtx->FTW |= RT_BIT(iNewTop);
7659 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7660 iemFpuRotateStackPush(pFpuCtx);
7661 }
7662 else
7663 {
7664 /* Exception pending - don't change TOP or the register stack. */
7665 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7666 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7667 }
7668}
7669
7670
7671DECL_NO_INLINE(IEM_STATIC, void)
7672iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7673{
7674 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7675 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7676 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7677
7678 if (pFpuCtx->FCW & X86_FCW_IM)
7679 {
7680 /* Masked overflow - Push QNaN. */
7681 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7682 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7683 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7684 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7685 pFpuCtx->FTW |= RT_BIT(iNewTop);
7686 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7687 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7688 iemFpuRotateStackPush(pFpuCtx);
7689 }
7690 else
7691 {
7692 /* Exception pending - don't change TOP or the register stack. */
7693 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7694 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7695 }
7696}
7697
7698
7699/**
7700 * Worker routine for raising an FPU stack overflow exception on a push.
7701 *
7702 * @param pFpuCtx The FPU context.
7703 */
7704IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7705{
7706 if (pFpuCtx->FCW & X86_FCW_IM)
7707 {
7708 /* Masked overflow. */
7709 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7710 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7711 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7712 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7713 pFpuCtx->FTW |= RT_BIT(iNewTop);
7714 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7715 iemFpuRotateStackPush(pFpuCtx);
7716 }
7717 else
7718 {
7719 /* Exception pending - don't change TOP or the register stack. */
7720 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7721 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7722 }
7723}
7724
7725
7726/**
7727 * Raises a FPU stack overflow exception on a push.
7728 *
7729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7730 */
7731DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7732{
7733 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7734 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7735 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7736 iemFpuStackPushOverflowOnly(pFpuCtx);
7737}
7738
7739
7740/**
7741 * Raises a FPU stack overflow exception on a push with a memory operand.
7742 *
7743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7744 * @param iEffSeg The effective memory operand selector register.
7745 * @param GCPtrEff The effective memory operand offset.
7746 */
7747DECL_NO_INLINE(IEM_STATIC, void)
7748iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7749{
7750 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7751 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7752 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7753 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7754 iemFpuStackPushOverflowOnly(pFpuCtx);
7755}
7756
7757
7758IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7759{
7760 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7761 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7762 if (pFpuCtx->FTW & RT_BIT(iReg))
7763 return VINF_SUCCESS;
7764 return VERR_NOT_FOUND;
7765}
7766
7767
7768IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7769{
7770 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7771 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7772 if (pFpuCtx->FTW & RT_BIT(iReg))
7773 {
7774 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7775 return VINF_SUCCESS;
7776 }
7777 return VERR_NOT_FOUND;
7778}
7779
7780
7781IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7782 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7783{
7784 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7785 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7786 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7787 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7788 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7789 {
7790 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7791 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7792 return VINF_SUCCESS;
7793 }
7794 return VERR_NOT_FOUND;
7795}
7796
7797
7798IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7799{
7800 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7801 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7802 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7803 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7804 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7805 {
7806 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7807 return VINF_SUCCESS;
7808 }
7809 return VERR_NOT_FOUND;
7810}
7811
7812
7813/**
7814 * Updates the FPU exception status after FCW is changed.
7815 *
7816 * @param pFpuCtx The FPU context.
7817 */
7818IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7819{
7820 uint16_t u16Fsw = pFpuCtx->FSW;
7821 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7822 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7823 else
7824 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7825 pFpuCtx->FSW = u16Fsw;
7826}
7827
7828
7829/**
7830 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7831 *
7832 * @returns The full FTW.
7833 * @param pFpuCtx The FPU context.
7834 */
7835IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7836{
7837 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7838 uint16_t u16Ftw = 0;
7839 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7840 for (unsigned iSt = 0; iSt < 8; iSt++)
7841 {
7842 unsigned const iReg = (iSt + iTop) & 7;
7843 if (!(u8Ftw & RT_BIT(iReg)))
7844 u16Ftw |= 3 << (iReg * 2); /* empty */
7845 else
7846 {
7847 uint16_t uTag;
7848 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7849 if (pr80Reg->s.uExponent == 0x7fff)
7850 uTag = 2; /* Exponent is all 1's => Special. */
7851 else if (pr80Reg->s.uExponent == 0x0000)
7852 {
7853 if (pr80Reg->s.u64Mantissa == 0x0000)
7854 uTag = 1; /* All bits are zero => Zero. */
7855 else
7856 uTag = 2; /* Must be special. */
7857 }
7858 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7859 uTag = 0; /* Valid. */
7860 else
7861 uTag = 2; /* Must be special. */
7862
7863 u16Ftw |= uTag << (iReg * 2); /* empty */
7864 }
7865 }
7866
7867 return u16Ftw;
7868}
7869
7870
7871/**
7872 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7873 *
7874 * @returns The compressed FTW.
7875 * @param u16FullFtw The full FTW to convert.
7876 */
7877IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7878{
7879 uint8_t u8Ftw = 0;
7880 for (unsigned i = 0; i < 8; i++)
7881 {
7882 if ((u16FullFtw & 3) != 3 /*empty*/)
7883 u8Ftw |= RT_BIT(i);
7884 u16FullFtw >>= 2;
7885 }
7886
7887 return u8Ftw;
7888}
7889
7890/** @} */
7891
7892
7893/** @name Memory access.
7894 *
7895 * @{
7896 */
7897
7898
7899/**
7900 * Updates the IEMCPU::cbWritten counter if applicable.
7901 *
7902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7903 * @param fAccess The access being accounted for.
7904 * @param cbMem The access size.
7905 */
7906DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7907{
7908 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7909 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7910 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7911}
7912
7913
7914/**
7915 * Checks if the given segment can be written to, raise the appropriate
7916 * exception if not.
7917 *
7918 * @returns VBox strict status code.
7919 *
7920 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7921 * @param pHid Pointer to the hidden register.
7922 * @param iSegReg The register number.
7923 * @param pu64BaseAddr Where to return the base address to use for the
7924 * segment. (In 64-bit code it may differ from the
7925 * base in the hidden segment.)
7926 */
7927IEM_STATIC VBOXSTRICTRC
7928iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7929{
7930 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7931 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7932 else
7933 {
7934 if (!pHid->Attr.n.u1Present)
7935 {
7936 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7937 AssertRelease(uSel == 0);
7938 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7939 return iemRaiseGeneralProtectionFault0(pVCpu);
7940 }
7941
7942 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7943 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7944 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7945 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7946 *pu64BaseAddr = pHid->u64Base;
7947 }
7948 return VINF_SUCCESS;
7949}
7950
7951
7952/**
7953 * Checks if the given segment can be read from, raise the appropriate
7954 * exception if not.
7955 *
7956 * @returns VBox strict status code.
7957 *
7958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7959 * @param pHid Pointer to the hidden register.
7960 * @param iSegReg The register number.
7961 * @param pu64BaseAddr Where to return the base address to use for the
7962 * segment. (In 64-bit code it may differ from the
7963 * base in the hidden segment.)
7964 */
7965IEM_STATIC VBOXSTRICTRC
7966iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7967{
7968 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7969 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7970 else
7971 {
7972 if (!pHid->Attr.n.u1Present)
7973 {
7974 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7975 AssertRelease(uSel == 0);
7976 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7977 return iemRaiseGeneralProtectionFault0(pVCpu);
7978 }
7979
7980 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7981 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7982 *pu64BaseAddr = pHid->u64Base;
7983 }
7984 return VINF_SUCCESS;
7985}
7986
7987
7988/**
7989 * Applies the segment limit, base and attributes.
7990 *
7991 * This may raise a \#GP or \#SS.
7992 *
7993 * @returns VBox strict status code.
7994 *
7995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7996 * @param fAccess The kind of access which is being performed.
7997 * @param iSegReg The index of the segment register to apply.
7998 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7999 * TSS, ++).
8000 * @param cbMem The access size.
8001 * @param pGCPtrMem Pointer to the guest memory address to apply
8002 * segmentation to. Input and output parameter.
8003 */
8004IEM_STATIC VBOXSTRICTRC
8005iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8006{
8007 if (iSegReg == UINT8_MAX)
8008 return VINF_SUCCESS;
8009
8010 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8011 switch (pVCpu->iem.s.enmCpuMode)
8012 {
8013 case IEMMODE_16BIT:
8014 case IEMMODE_32BIT:
8015 {
8016 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8017 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8018
8019 if ( pSel->Attr.n.u1Present
8020 && !pSel->Attr.n.u1Unusable)
8021 {
8022 Assert(pSel->Attr.n.u1DescType);
8023 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8024 {
8025 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8026 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8027 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8028
8029 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8030 {
8031 /** @todo CPL check. */
8032 }
8033
8034 /*
8035 * There are two kinds of data selectors, normal and expand down.
8036 */
8037 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8038 {
8039 if ( GCPtrFirst32 > pSel->u32Limit
8040 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8041 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8042 }
8043 else
8044 {
8045 /*
8046 * The upper boundary is defined by the B bit, not the G bit!
8047 */
8048 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8049 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8050 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8051 }
8052 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8053 }
8054 else
8055 {
8056
8057 /*
8058 * Code selector and usually be used to read thru, writing is
8059 * only permitted in real and V8086 mode.
8060 */
8061 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8062 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8063 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8064 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8065 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8066
8067 if ( GCPtrFirst32 > pSel->u32Limit
8068 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8069 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8070
8071 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8072 {
8073 /** @todo CPL check. */
8074 }
8075
8076 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8077 }
8078 }
8079 else
8080 return iemRaiseGeneralProtectionFault0(pVCpu);
8081 return VINF_SUCCESS;
8082 }
8083
8084 case IEMMODE_64BIT:
8085 {
8086 RTGCPTR GCPtrMem = *pGCPtrMem;
8087 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8088 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8089
8090 Assert(cbMem >= 1);
8091 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8092 return VINF_SUCCESS;
8093 return iemRaiseGeneralProtectionFault0(pVCpu);
8094 }
8095
8096 default:
8097 AssertFailedReturn(VERR_IEM_IPE_7);
8098 }
8099}
8100
8101
8102/**
8103 * Translates a virtual address to a physical physical address and checks if we
8104 * can access the page as specified.
8105 *
8106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8107 * @param GCPtrMem The virtual address.
8108 * @param fAccess The intended access.
8109 * @param pGCPhysMem Where to return the physical address.
8110 */
8111IEM_STATIC VBOXSTRICTRC
8112iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8113{
8114 /** @todo Need a different PGM interface here. We're currently using
8115 * generic / REM interfaces. this won't cut it for R0 & RC. */
8116 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8117 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8118 RTGCPHYS GCPhys;
8119 uint64_t fFlags;
8120 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8121 if (RT_FAILURE(rc))
8122 {
8123 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8124 /** @todo Check unassigned memory in unpaged mode. */
8125 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8126 *pGCPhysMem = NIL_RTGCPHYS;
8127 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8128 }
8129
8130 /* If the page is writable and does not have the no-exec bit set, all
8131 access is allowed. Otherwise we'll have to check more carefully... */
8132 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8133 {
8134 /* Write to read only memory? */
8135 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8136 && !(fFlags & X86_PTE_RW)
8137 && ( (pVCpu->iem.s.uCpl == 3
8138 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8139 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8140 {
8141 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8142 *pGCPhysMem = NIL_RTGCPHYS;
8143 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8144 }
8145
8146 /* Kernel memory accessed by userland? */
8147 if ( !(fFlags & X86_PTE_US)
8148 && pVCpu->iem.s.uCpl == 3
8149 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8150 {
8151 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8152 *pGCPhysMem = NIL_RTGCPHYS;
8153 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8154 }
8155
8156 /* Executing non-executable memory? */
8157 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8158 && (fFlags & X86_PTE_PAE_NX)
8159 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8160 {
8161 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8162 *pGCPhysMem = NIL_RTGCPHYS;
8163 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8164 VERR_ACCESS_DENIED);
8165 }
8166 }
8167
8168 /*
8169 * Set the dirty / access flags.
8170 * ASSUMES this is set when the address is translated rather than on committ...
8171 */
8172 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8173 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8174 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8175 {
8176 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8177 AssertRC(rc2);
8178 }
8179
8180 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8181 *pGCPhysMem = GCPhys;
8182 return VINF_SUCCESS;
8183}
8184
8185
8186
8187/**
8188 * Maps a physical page.
8189 *
8190 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8192 * @param GCPhysMem The physical address.
8193 * @param fAccess The intended access.
8194 * @param ppvMem Where to return the mapping address.
8195 * @param pLock The PGM lock.
8196 */
8197IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8198{
8199#ifdef IEM_VERIFICATION_MODE_FULL
8200 /* Force the alternative path so we can ignore writes. */
8201 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8202 {
8203 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8204 {
8205 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8206 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8207 if (RT_FAILURE(rc2))
8208 pVCpu->iem.s.fProblematicMemory = true;
8209 }
8210 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8211 }
8212#endif
8213#ifdef IEM_LOG_MEMORY_WRITES
8214 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8215 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8216#endif
8217#ifdef IEM_VERIFICATION_MODE_MINIMAL
8218 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8219#endif
8220
8221 /** @todo This API may require some improving later. A private deal with PGM
8222 * regarding locking and unlocking needs to be struct. A couple of TLBs
8223 * living in PGM, but with publicly accessible inlined access methods
8224 * could perhaps be an even better solution. */
8225 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8226 GCPhysMem,
8227 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8228 pVCpu->iem.s.fBypassHandlers,
8229 ppvMem,
8230 pLock);
8231 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8232 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8233
8234#ifdef IEM_VERIFICATION_MODE_FULL
8235 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8236 pVCpu->iem.s.fProblematicMemory = true;
8237#endif
8238 return rc;
8239}
8240
8241
8242/**
8243 * Unmap a page previously mapped by iemMemPageMap.
8244 *
8245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8246 * @param GCPhysMem The physical address.
8247 * @param fAccess The intended access.
8248 * @param pvMem What iemMemPageMap returned.
8249 * @param pLock The PGM lock.
8250 */
8251DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8252{
8253 NOREF(pVCpu);
8254 NOREF(GCPhysMem);
8255 NOREF(fAccess);
8256 NOREF(pvMem);
8257 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8258}
8259
8260
8261/**
8262 * Looks up a memory mapping entry.
8263 *
8264 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8266 * @param pvMem The memory address.
8267 * @param fAccess The access to.
8268 */
8269DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8270{
8271 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8272 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8273 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8274 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8275 return 0;
8276 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8277 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8278 return 1;
8279 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8280 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8281 return 2;
8282 return VERR_NOT_FOUND;
8283}
8284
8285
8286/**
8287 * Finds a free memmap entry when using iNextMapping doesn't work.
8288 *
8289 * @returns Memory mapping index, 1024 on failure.
8290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8291 */
8292IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8293{
8294 /*
8295 * The easy case.
8296 */
8297 if (pVCpu->iem.s.cActiveMappings == 0)
8298 {
8299 pVCpu->iem.s.iNextMapping = 1;
8300 return 0;
8301 }
8302
8303 /* There should be enough mappings for all instructions. */
8304 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8305
8306 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8307 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8308 return i;
8309
8310 AssertFailedReturn(1024);
8311}
8312
8313
8314/**
8315 * Commits a bounce buffer that needs writing back and unmaps it.
8316 *
8317 * @returns Strict VBox status code.
8318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8319 * @param iMemMap The index of the buffer to commit.
8320 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8321 * Always false in ring-3, obviously.
8322 */
8323IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8324{
8325 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8326 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8327#ifdef IN_RING3
8328 Assert(!fPostponeFail);
8329 RT_NOREF_PV(fPostponeFail);
8330#endif
8331
8332 /*
8333 * Do the writing.
8334 */
8335#ifndef IEM_VERIFICATION_MODE_MINIMAL
8336 PVM pVM = pVCpu->CTX_SUFF(pVM);
8337 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8338 && !IEM_VERIFICATION_ENABLED(pVCpu))
8339 {
8340 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8341 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8342 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8343 if (!pVCpu->iem.s.fBypassHandlers)
8344 {
8345 /*
8346 * Carefully and efficiently dealing with access handler return
8347 * codes make this a little bloated.
8348 */
8349 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8350 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8351 pbBuf,
8352 cbFirst,
8353 PGMACCESSORIGIN_IEM);
8354 if (rcStrict == VINF_SUCCESS)
8355 {
8356 if (cbSecond)
8357 {
8358 rcStrict = PGMPhysWrite(pVM,
8359 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8360 pbBuf + cbFirst,
8361 cbSecond,
8362 PGMACCESSORIGIN_IEM);
8363 if (rcStrict == VINF_SUCCESS)
8364 { /* nothing */ }
8365 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8366 {
8367 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8368 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8369 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8370 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8371 }
8372# ifndef IN_RING3
8373 else if (fPostponeFail)
8374 {
8375 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8376 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8377 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8378 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8379 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8380 return iemSetPassUpStatus(pVCpu, rcStrict);
8381 }
8382# endif
8383 else
8384 {
8385 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8386 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8387 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8388 return rcStrict;
8389 }
8390 }
8391 }
8392 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8393 {
8394 if (!cbSecond)
8395 {
8396 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8397 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8398 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8399 }
8400 else
8401 {
8402 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8403 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8404 pbBuf + cbFirst,
8405 cbSecond,
8406 PGMACCESSORIGIN_IEM);
8407 if (rcStrict2 == VINF_SUCCESS)
8408 {
8409 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8410 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8412 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8413 }
8414 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8415 {
8416 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8417 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8418 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8419 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8420 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8421 }
8422# ifndef IN_RING3
8423 else if (fPostponeFail)
8424 {
8425 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8426 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8427 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8428 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8429 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8430 return iemSetPassUpStatus(pVCpu, rcStrict);
8431 }
8432# endif
8433 else
8434 {
8435 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8436 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8437 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8438 return rcStrict2;
8439 }
8440 }
8441 }
8442# ifndef IN_RING3
8443 else if (fPostponeFail)
8444 {
8445 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8446 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8447 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8448 if (!cbSecond)
8449 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8450 else
8451 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8452 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8453 return iemSetPassUpStatus(pVCpu, rcStrict);
8454 }
8455# endif
8456 else
8457 {
8458 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8459 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8460 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8461 return rcStrict;
8462 }
8463 }
8464 else
8465 {
8466 /*
8467 * No access handlers, much simpler.
8468 */
8469 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8470 if (RT_SUCCESS(rc))
8471 {
8472 if (cbSecond)
8473 {
8474 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8475 if (RT_SUCCESS(rc))
8476 { /* likely */ }
8477 else
8478 {
8479 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8480 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8481 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8482 return rc;
8483 }
8484 }
8485 }
8486 else
8487 {
8488 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8489 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8490 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8491 return rc;
8492 }
8493 }
8494 }
8495#endif
8496
8497#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8498 /*
8499 * Record the write(s).
8500 */
8501 if (!pVCpu->iem.s.fNoRem)
8502 {
8503 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8504 if (pEvtRec)
8505 {
8506 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8507 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8508 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8509 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8510 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8511 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8512 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8513 }
8514 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8515 {
8516 pEvtRec = iemVerifyAllocRecord(pVCpu);
8517 if (pEvtRec)
8518 {
8519 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8520 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8521 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8522 memcpy(pEvtRec->u.RamWrite.ab,
8523 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8524 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8525 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8526 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8527 }
8528 }
8529 }
8530#endif
8531#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8532 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8533 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8534 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8535 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8536 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8537 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8538
8539 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8540 g_cbIemWrote = cbWrote;
8541 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8542#endif
8543
8544 /*
8545 * Free the mapping entry.
8546 */
8547 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8548 Assert(pVCpu->iem.s.cActiveMappings != 0);
8549 pVCpu->iem.s.cActiveMappings--;
8550 return VINF_SUCCESS;
8551}
8552
8553
8554/**
8555 * iemMemMap worker that deals with a request crossing pages.
8556 */
8557IEM_STATIC VBOXSTRICTRC
8558iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8559{
8560 /*
8561 * Do the address translations.
8562 */
8563 RTGCPHYS GCPhysFirst;
8564 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8565 if (rcStrict != VINF_SUCCESS)
8566 return rcStrict;
8567
8568 RTGCPHYS GCPhysSecond;
8569 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8570 fAccess, &GCPhysSecond);
8571 if (rcStrict != VINF_SUCCESS)
8572 return rcStrict;
8573 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8574
8575 PVM pVM = pVCpu->CTX_SUFF(pVM);
8576#ifdef IEM_VERIFICATION_MODE_FULL
8577 /*
8578 * Detect problematic memory when verifying so we can select
8579 * the right execution engine. (TLB: Redo this.)
8580 */
8581 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8582 {
8583 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8584 if (RT_SUCCESS(rc2))
8585 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8586 if (RT_FAILURE(rc2))
8587 pVCpu->iem.s.fProblematicMemory = true;
8588 }
8589#endif
8590
8591
8592 /*
8593 * Read in the current memory content if it's a read, execute or partial
8594 * write access.
8595 */
8596 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8597 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8598 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8599
8600 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8601 {
8602 if (!pVCpu->iem.s.fBypassHandlers)
8603 {
8604 /*
8605 * Must carefully deal with access handler status codes here,
8606 * makes the code a bit bloated.
8607 */
8608 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8609 if (rcStrict == VINF_SUCCESS)
8610 {
8611 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8612 if (rcStrict == VINF_SUCCESS)
8613 { /*likely */ }
8614 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8615 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8616 else
8617 {
8618 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8619 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8620 return rcStrict;
8621 }
8622 }
8623 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8624 {
8625 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8626 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8627 {
8628 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8629 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8630 }
8631 else
8632 {
8633 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8634 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8635 return rcStrict2;
8636 }
8637 }
8638 else
8639 {
8640 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8641 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8642 return rcStrict;
8643 }
8644 }
8645 else
8646 {
8647 /*
8648 * No informational status codes here, much more straight forward.
8649 */
8650 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8651 if (RT_SUCCESS(rc))
8652 {
8653 Assert(rc == VINF_SUCCESS);
8654 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8655 if (RT_SUCCESS(rc))
8656 Assert(rc == VINF_SUCCESS);
8657 else
8658 {
8659 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8660 return rc;
8661 }
8662 }
8663 else
8664 {
8665 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8666 return rc;
8667 }
8668 }
8669
8670#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8671 if ( !pVCpu->iem.s.fNoRem
8672 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8673 {
8674 /*
8675 * Record the reads.
8676 */
8677 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8678 if (pEvtRec)
8679 {
8680 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8681 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8682 pEvtRec->u.RamRead.cb = cbFirstPage;
8683 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8684 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8685 }
8686 pEvtRec = iemVerifyAllocRecord(pVCpu);
8687 if (pEvtRec)
8688 {
8689 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8690 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8691 pEvtRec->u.RamRead.cb = cbSecondPage;
8692 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8693 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8694 }
8695 }
8696#endif
8697 }
8698#ifdef VBOX_STRICT
8699 else
8700 memset(pbBuf, 0xcc, cbMem);
8701 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8702 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8703#endif
8704
8705 /*
8706 * Commit the bounce buffer entry.
8707 */
8708 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8709 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8710 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8711 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8712 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8713 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8714 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8715 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8716 pVCpu->iem.s.cActiveMappings++;
8717
8718 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8719 *ppvMem = pbBuf;
8720 return VINF_SUCCESS;
8721}
8722
8723
8724/**
8725 * iemMemMap woker that deals with iemMemPageMap failures.
8726 */
8727IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8728 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8729{
8730 /*
8731 * Filter out conditions we can handle and the ones which shouldn't happen.
8732 */
8733 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8734 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8735 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8736 {
8737 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8738 return rcMap;
8739 }
8740 pVCpu->iem.s.cPotentialExits++;
8741
8742 /*
8743 * Read in the current memory content if it's a read, execute or partial
8744 * write access.
8745 */
8746 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8747 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8748 {
8749 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8750 memset(pbBuf, 0xff, cbMem);
8751 else
8752 {
8753 int rc;
8754 if (!pVCpu->iem.s.fBypassHandlers)
8755 {
8756 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8757 if (rcStrict == VINF_SUCCESS)
8758 { /* nothing */ }
8759 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8760 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8761 else
8762 {
8763 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8764 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8765 return rcStrict;
8766 }
8767 }
8768 else
8769 {
8770 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8771 if (RT_SUCCESS(rc))
8772 { /* likely */ }
8773 else
8774 {
8775 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8776 GCPhysFirst, rc));
8777 return rc;
8778 }
8779 }
8780 }
8781
8782#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8783 if ( !pVCpu->iem.s.fNoRem
8784 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8785 {
8786 /*
8787 * Record the read.
8788 */
8789 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8790 if (pEvtRec)
8791 {
8792 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8793 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8794 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8795 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8796 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8797 }
8798 }
8799#endif
8800 }
8801#ifdef VBOX_STRICT
8802 else
8803 memset(pbBuf, 0xcc, cbMem);
8804#endif
8805#ifdef VBOX_STRICT
8806 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8807 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8808#endif
8809
8810 /*
8811 * Commit the bounce buffer entry.
8812 */
8813 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8814 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8815 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8816 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8817 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8818 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8819 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8820 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8821 pVCpu->iem.s.cActiveMappings++;
8822
8823 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8824 *ppvMem = pbBuf;
8825 return VINF_SUCCESS;
8826}
8827
8828
8829
8830/**
8831 * Maps the specified guest memory for the given kind of access.
8832 *
8833 * This may be using bounce buffering of the memory if it's crossing a page
8834 * boundary or if there is an access handler installed for any of it. Because
8835 * of lock prefix guarantees, we're in for some extra clutter when this
8836 * happens.
8837 *
8838 * This may raise a \#GP, \#SS, \#PF or \#AC.
8839 *
8840 * @returns VBox strict status code.
8841 *
8842 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8843 * @param ppvMem Where to return the pointer to the mapped
8844 * memory.
8845 * @param cbMem The number of bytes to map. This is usually 1,
8846 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8847 * string operations it can be up to a page.
8848 * @param iSegReg The index of the segment register to use for
8849 * this access. The base and limits are checked.
8850 * Use UINT8_MAX to indicate that no segmentation
8851 * is required (for IDT, GDT and LDT accesses).
8852 * @param GCPtrMem The address of the guest memory.
8853 * @param fAccess How the memory is being accessed. The
8854 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8855 * how to map the memory, while the
8856 * IEM_ACCESS_WHAT_XXX bit is used when raising
8857 * exceptions.
8858 */
8859IEM_STATIC VBOXSTRICTRC
8860iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8861{
8862 /*
8863 * Check the input and figure out which mapping entry to use.
8864 */
8865 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8866 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8867 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8868
8869 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8870 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8871 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8872 {
8873 iMemMap = iemMemMapFindFree(pVCpu);
8874 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8875 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8876 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8877 pVCpu->iem.s.aMemMappings[2].fAccess),
8878 VERR_IEM_IPE_9);
8879 }
8880
8881 /*
8882 * Map the memory, checking that we can actually access it. If something
8883 * slightly complicated happens, fall back on bounce buffering.
8884 */
8885 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8886 if (rcStrict != VINF_SUCCESS)
8887 return rcStrict;
8888
8889 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8890 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8891
8892 RTGCPHYS GCPhysFirst;
8893 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8894 if (rcStrict != VINF_SUCCESS)
8895 return rcStrict;
8896
8897 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8898 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8899 if (fAccess & IEM_ACCESS_TYPE_READ)
8900 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8901
8902 void *pvMem;
8903 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8904 if (rcStrict != VINF_SUCCESS)
8905 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8906
8907 /*
8908 * Fill in the mapping table entry.
8909 */
8910 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8911 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8912 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8913 pVCpu->iem.s.cActiveMappings++;
8914
8915 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8916 *ppvMem = pvMem;
8917 return VINF_SUCCESS;
8918}
8919
8920
8921/**
8922 * Commits the guest memory if bounce buffered and unmaps it.
8923 *
8924 * @returns Strict VBox status code.
8925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8926 * @param pvMem The mapping.
8927 * @param fAccess The kind of access.
8928 */
8929IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8930{
8931 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8932 AssertReturn(iMemMap >= 0, iMemMap);
8933
8934 /* If it's bounce buffered, we may need to write back the buffer. */
8935 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8936 {
8937 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8938 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8939 }
8940 /* Otherwise unlock it. */
8941 else
8942 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8943
8944 /* Free the entry. */
8945 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8946 Assert(pVCpu->iem.s.cActiveMappings != 0);
8947 pVCpu->iem.s.cActiveMappings--;
8948 return VINF_SUCCESS;
8949}
8950
8951#ifdef IEM_WITH_SETJMP
8952
8953/**
8954 * Maps the specified guest memory for the given kind of access, longjmp on
8955 * error.
8956 *
8957 * This may be using bounce buffering of the memory if it's crossing a page
8958 * boundary or if there is an access handler installed for any of it. Because
8959 * of lock prefix guarantees, we're in for some extra clutter when this
8960 * happens.
8961 *
8962 * This may raise a \#GP, \#SS, \#PF or \#AC.
8963 *
8964 * @returns Pointer to the mapped memory.
8965 *
8966 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8967 * @param cbMem The number of bytes to map. This is usually 1,
8968 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8969 * string operations it can be up to a page.
8970 * @param iSegReg The index of the segment register to use for
8971 * this access. The base and limits are checked.
8972 * Use UINT8_MAX to indicate that no segmentation
8973 * is required (for IDT, GDT and LDT accesses).
8974 * @param GCPtrMem The address of the guest memory.
8975 * @param fAccess How the memory is being accessed. The
8976 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8977 * how to map the memory, while the
8978 * IEM_ACCESS_WHAT_XXX bit is used when raising
8979 * exceptions.
8980 */
8981IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8982{
8983 /*
8984 * Check the input and figure out which mapping entry to use.
8985 */
8986 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8987 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8988 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8989
8990 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8991 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8992 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8993 {
8994 iMemMap = iemMemMapFindFree(pVCpu);
8995 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8996 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8997 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8998 pVCpu->iem.s.aMemMappings[2].fAccess),
8999 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
9000 }
9001
9002 /*
9003 * Map the memory, checking that we can actually access it. If something
9004 * slightly complicated happens, fall back on bounce buffering.
9005 */
9006 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9007 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9008 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9009
9010 /* Crossing a page boundary? */
9011 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9012 { /* No (likely). */ }
9013 else
9014 {
9015 void *pvMem;
9016 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9017 if (rcStrict == VINF_SUCCESS)
9018 return pvMem;
9019 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9020 }
9021
9022 RTGCPHYS GCPhysFirst;
9023 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9024 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9025 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9026
9027 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9028 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9029 if (fAccess & IEM_ACCESS_TYPE_READ)
9030 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9031
9032 void *pvMem;
9033 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9034 if (rcStrict == VINF_SUCCESS)
9035 { /* likely */ }
9036 else
9037 {
9038 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9039 if (rcStrict == VINF_SUCCESS)
9040 return pvMem;
9041 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9042 }
9043
9044 /*
9045 * Fill in the mapping table entry.
9046 */
9047 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9048 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9049 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9050 pVCpu->iem.s.cActiveMappings++;
9051
9052 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9053 return pvMem;
9054}
9055
9056
9057/**
9058 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9059 *
9060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9061 * @param pvMem The mapping.
9062 * @param fAccess The kind of access.
9063 */
9064IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9065{
9066 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9067 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9068
9069 /* If it's bounce buffered, we may need to write back the buffer. */
9070 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9071 {
9072 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9073 {
9074 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9075 if (rcStrict == VINF_SUCCESS)
9076 return;
9077 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9078 }
9079 }
9080 /* Otherwise unlock it. */
9081 else
9082 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9083
9084 /* Free the entry. */
9085 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9086 Assert(pVCpu->iem.s.cActiveMappings != 0);
9087 pVCpu->iem.s.cActiveMappings--;
9088}
9089
9090#endif
9091
9092#ifndef IN_RING3
9093/**
9094 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9095 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9096 *
9097 * Allows the instruction to be completed and retired, while the IEM user will
9098 * return to ring-3 immediately afterwards and do the postponed writes there.
9099 *
9100 * @returns VBox status code (no strict statuses). Caller must check
9101 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9103 * @param pvMem The mapping.
9104 * @param fAccess The kind of access.
9105 */
9106IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9107{
9108 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9109 AssertReturn(iMemMap >= 0, iMemMap);
9110
9111 /* If it's bounce buffered, we may need to write back the buffer. */
9112 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9113 {
9114 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9115 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9116 }
9117 /* Otherwise unlock it. */
9118 else
9119 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9120
9121 /* Free the entry. */
9122 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9123 Assert(pVCpu->iem.s.cActiveMappings != 0);
9124 pVCpu->iem.s.cActiveMappings--;
9125 return VINF_SUCCESS;
9126}
9127#endif
9128
9129
9130/**
9131 * Rollbacks mappings, releasing page locks and such.
9132 *
9133 * The caller shall only call this after checking cActiveMappings.
9134 *
9135 * @returns Strict VBox status code to pass up.
9136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9137 */
9138IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9139{
9140 Assert(pVCpu->iem.s.cActiveMappings > 0);
9141
9142 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9143 while (iMemMap-- > 0)
9144 {
9145 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9146 if (fAccess != IEM_ACCESS_INVALID)
9147 {
9148 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9149 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9150 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9151 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9152 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9153 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9154 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9155 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9156 pVCpu->iem.s.cActiveMappings--;
9157 }
9158 }
9159}
9160
9161
9162/**
9163 * Fetches a data byte.
9164 *
9165 * @returns Strict VBox status code.
9166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9167 * @param pu8Dst Where to return the byte.
9168 * @param iSegReg The index of the segment register to use for
9169 * this access. The base and limits are checked.
9170 * @param GCPtrMem The address of the guest memory.
9171 */
9172IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9173{
9174 /* The lazy approach for now... */
9175 uint8_t const *pu8Src;
9176 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9177 if (rc == VINF_SUCCESS)
9178 {
9179 *pu8Dst = *pu8Src;
9180 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9181 }
9182 return rc;
9183}
9184
9185
9186#ifdef IEM_WITH_SETJMP
9187/**
9188 * Fetches a data byte, longjmp on error.
9189 *
9190 * @returns The byte.
9191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9192 * @param iSegReg The index of the segment register to use for
9193 * this access. The base and limits are checked.
9194 * @param GCPtrMem The address of the guest memory.
9195 */
9196DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9197{
9198 /* The lazy approach for now... */
9199 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9200 uint8_t const bRet = *pu8Src;
9201 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9202 return bRet;
9203}
9204#endif /* IEM_WITH_SETJMP */
9205
9206
9207/**
9208 * Fetches a data word.
9209 *
9210 * @returns Strict VBox status code.
9211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9212 * @param pu16Dst Where to return the word.
9213 * @param iSegReg The index of the segment register to use for
9214 * this access. The base and limits are checked.
9215 * @param GCPtrMem The address of the guest memory.
9216 */
9217IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9218{
9219 /* The lazy approach for now... */
9220 uint16_t const *pu16Src;
9221 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9222 if (rc == VINF_SUCCESS)
9223 {
9224 *pu16Dst = *pu16Src;
9225 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9226 }
9227 return rc;
9228}
9229
9230
9231#ifdef IEM_WITH_SETJMP
9232/**
9233 * Fetches a data word, longjmp on error.
9234 *
9235 * @returns The word
9236 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9237 * @param iSegReg The index of the segment register to use for
9238 * this access. The base and limits are checked.
9239 * @param GCPtrMem The address of the guest memory.
9240 */
9241DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9242{
9243 /* The lazy approach for now... */
9244 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9245 uint16_t const u16Ret = *pu16Src;
9246 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9247 return u16Ret;
9248}
9249#endif
9250
9251
9252/**
9253 * Fetches a data dword.
9254 *
9255 * @returns Strict VBox status code.
9256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9257 * @param pu32Dst Where to return the dword.
9258 * @param iSegReg The index of the segment register to use for
9259 * this access. The base and limits are checked.
9260 * @param GCPtrMem The address of the guest memory.
9261 */
9262IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9263{
9264 /* The lazy approach for now... */
9265 uint32_t const *pu32Src;
9266 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9267 if (rc == VINF_SUCCESS)
9268 {
9269 *pu32Dst = *pu32Src;
9270 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9271 }
9272 return rc;
9273}
9274
9275
9276#ifdef IEM_WITH_SETJMP
9277
9278IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9279{
9280 Assert(cbMem >= 1);
9281 Assert(iSegReg < X86_SREG_COUNT);
9282
9283 /*
9284 * 64-bit mode is simpler.
9285 */
9286 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9287 {
9288 if (iSegReg >= X86_SREG_FS)
9289 {
9290 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9291 GCPtrMem += pSel->u64Base;
9292 }
9293
9294 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9295 return GCPtrMem;
9296 }
9297 /*
9298 * 16-bit and 32-bit segmentation.
9299 */
9300 else
9301 {
9302 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9303 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9304 == X86DESCATTR_P /* data, expand up */
9305 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9306 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9307 {
9308 /* expand up */
9309 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9310 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9311 && GCPtrLast32 > (uint32_t)GCPtrMem))
9312 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9313 }
9314 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9315 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9316 {
9317 /* expand down */
9318 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9319 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9320 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9321 && GCPtrLast32 > (uint32_t)GCPtrMem))
9322 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9323 }
9324 else
9325 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9326 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9327 }
9328 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9329}
9330
9331
9332IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9333{
9334 Assert(cbMem >= 1);
9335 Assert(iSegReg < X86_SREG_COUNT);
9336
9337 /*
9338 * 64-bit mode is simpler.
9339 */
9340 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9341 {
9342 if (iSegReg >= X86_SREG_FS)
9343 {
9344 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9345 GCPtrMem += pSel->u64Base;
9346 }
9347
9348 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9349 return GCPtrMem;
9350 }
9351 /*
9352 * 16-bit and 32-bit segmentation.
9353 */
9354 else
9355 {
9356 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9357 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9358 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9359 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9360 {
9361 /* expand up */
9362 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9363 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9364 && GCPtrLast32 > (uint32_t)GCPtrMem))
9365 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9366 }
9367 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9368 {
9369 /* expand down */
9370 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9371 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9372 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9373 && GCPtrLast32 > (uint32_t)GCPtrMem))
9374 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9375 }
9376 else
9377 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9378 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9379 }
9380 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9381}
9382
9383
9384/**
9385 * Fetches a data dword, longjmp on error, fallback/safe version.
9386 *
9387 * @returns The dword
9388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9389 * @param iSegReg The index of the segment register to use for
9390 * this access. The base and limits are checked.
9391 * @param GCPtrMem The address of the guest memory.
9392 */
9393IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9394{
9395 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9396 uint32_t const u32Ret = *pu32Src;
9397 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9398 return u32Ret;
9399}
9400
9401
9402/**
9403 * Fetches a data dword, longjmp on error.
9404 *
9405 * @returns The dword
9406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9407 * @param iSegReg The index of the segment register to use for
9408 * this access. The base and limits are checked.
9409 * @param GCPtrMem The address of the guest memory.
9410 */
9411DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9412{
9413# ifdef IEM_WITH_DATA_TLB
9414 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9415 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9416 {
9417 /// @todo more later.
9418 }
9419
9420 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9421# else
9422 /* The lazy approach. */
9423 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9424 uint32_t const u32Ret = *pu32Src;
9425 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9426 return u32Ret;
9427# endif
9428}
9429#endif
9430
9431
9432#ifdef SOME_UNUSED_FUNCTION
9433/**
9434 * Fetches a data dword and sign extends it to a qword.
9435 *
9436 * @returns Strict VBox status code.
9437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9438 * @param pu64Dst Where to return the sign extended value.
9439 * @param iSegReg The index of the segment register to use for
9440 * this access. The base and limits are checked.
9441 * @param GCPtrMem The address of the guest memory.
9442 */
9443IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9444{
9445 /* The lazy approach for now... */
9446 int32_t const *pi32Src;
9447 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9448 if (rc == VINF_SUCCESS)
9449 {
9450 *pu64Dst = *pi32Src;
9451 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9452 }
9453#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9454 else
9455 *pu64Dst = 0;
9456#endif
9457 return rc;
9458}
9459#endif
9460
9461
9462/**
9463 * Fetches a data qword.
9464 *
9465 * @returns Strict VBox status code.
9466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9467 * @param pu64Dst Where to return the qword.
9468 * @param iSegReg The index of the segment register to use for
9469 * this access. The base and limits are checked.
9470 * @param GCPtrMem The address of the guest memory.
9471 */
9472IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9473{
9474 /* The lazy approach for now... */
9475 uint64_t const *pu64Src;
9476 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9477 if (rc == VINF_SUCCESS)
9478 {
9479 *pu64Dst = *pu64Src;
9480 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9481 }
9482 return rc;
9483}
9484
9485
9486#ifdef IEM_WITH_SETJMP
9487/**
9488 * Fetches a data qword, longjmp on error.
9489 *
9490 * @returns The qword.
9491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9492 * @param iSegReg The index of the segment register to use for
9493 * this access. The base and limits are checked.
9494 * @param GCPtrMem The address of the guest memory.
9495 */
9496DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9497{
9498 /* The lazy approach for now... */
9499 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9500 uint64_t const u64Ret = *pu64Src;
9501 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9502 return u64Ret;
9503}
9504#endif
9505
9506
9507/**
9508 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9509 *
9510 * @returns Strict VBox status code.
9511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9512 * @param pu64Dst Where to return the qword.
9513 * @param iSegReg The index of the segment register to use for
9514 * this access. The base and limits are checked.
9515 * @param GCPtrMem The address of the guest memory.
9516 */
9517IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9518{
9519 /* The lazy approach for now... */
9520 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9521 if (RT_UNLIKELY(GCPtrMem & 15))
9522 return iemRaiseGeneralProtectionFault0(pVCpu);
9523
9524 uint64_t const *pu64Src;
9525 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9526 if (rc == VINF_SUCCESS)
9527 {
9528 *pu64Dst = *pu64Src;
9529 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9530 }
9531 return rc;
9532}
9533
9534
9535#ifdef IEM_WITH_SETJMP
9536/**
9537 * Fetches a data qword, longjmp on error.
9538 *
9539 * @returns The qword.
9540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9541 * @param iSegReg The index of the segment register to use for
9542 * this access. The base and limits are checked.
9543 * @param GCPtrMem The address of the guest memory.
9544 */
9545DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9546{
9547 /* The lazy approach for now... */
9548 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9549 if (RT_LIKELY(!(GCPtrMem & 15)))
9550 {
9551 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9552 uint64_t const u64Ret = *pu64Src;
9553 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9554 return u64Ret;
9555 }
9556
9557 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9558 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9559}
9560#endif
9561
9562
9563/**
9564 * Fetches a data tword.
9565 *
9566 * @returns Strict VBox status code.
9567 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9568 * @param pr80Dst Where to return the tword.
9569 * @param iSegReg The index of the segment register to use for
9570 * this access. The base and limits are checked.
9571 * @param GCPtrMem The address of the guest memory.
9572 */
9573IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9574{
9575 /* The lazy approach for now... */
9576 PCRTFLOAT80U pr80Src;
9577 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9578 if (rc == VINF_SUCCESS)
9579 {
9580 *pr80Dst = *pr80Src;
9581 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9582 }
9583 return rc;
9584}
9585
9586
9587#ifdef IEM_WITH_SETJMP
9588/**
9589 * Fetches a data tword, longjmp on error.
9590 *
9591 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9592 * @param pr80Dst Where to return the tword.
9593 * @param iSegReg The index of the segment register to use for
9594 * this access. The base and limits are checked.
9595 * @param GCPtrMem The address of the guest memory.
9596 */
9597DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9598{
9599 /* The lazy approach for now... */
9600 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9601 *pr80Dst = *pr80Src;
9602 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9603}
9604#endif
9605
9606
9607/**
9608 * Fetches a data dqword (double qword), generally SSE related.
9609 *
9610 * @returns Strict VBox status code.
9611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9612 * @param pu128Dst Where to return the qword.
9613 * @param iSegReg The index of the segment register to use for
9614 * this access. The base and limits are checked.
9615 * @param GCPtrMem The address of the guest memory.
9616 */
9617IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9618{
9619 /* The lazy approach for now... */
9620 PCRTUINT128U pu128Src;
9621 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9622 if (rc == VINF_SUCCESS)
9623 {
9624 pu128Dst->au64[0] = pu128Src->au64[0];
9625 pu128Dst->au64[1] = pu128Src->au64[1];
9626 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9627 }
9628 return rc;
9629}
9630
9631
9632#ifdef IEM_WITH_SETJMP
9633/**
9634 * Fetches a data dqword (double qword), generally SSE related.
9635 *
9636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9637 * @param pu128Dst Where to return the qword.
9638 * @param iSegReg The index of the segment register to use for
9639 * this access. The base and limits are checked.
9640 * @param GCPtrMem The address of the guest memory.
9641 */
9642IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9643{
9644 /* The lazy approach for now... */
9645 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9646 pu128Dst->au64[0] = pu128Src->au64[0];
9647 pu128Dst->au64[1] = pu128Src->au64[1];
9648 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9649}
9650#endif
9651
9652
9653/**
9654 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9655 * related.
9656 *
9657 * Raises \#GP(0) if not aligned.
9658 *
9659 * @returns Strict VBox status code.
9660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9661 * @param pu128Dst Where to return the qword.
9662 * @param iSegReg The index of the segment register to use for
9663 * this access. The base and limits are checked.
9664 * @param GCPtrMem The address of the guest memory.
9665 */
9666IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9667{
9668 /* The lazy approach for now... */
9669 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9670 if ( (GCPtrMem & 15)
9671 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9672 return iemRaiseGeneralProtectionFault0(pVCpu);
9673
9674 PCRTUINT128U pu128Src;
9675 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9676 if (rc == VINF_SUCCESS)
9677 {
9678 pu128Dst->au64[0] = pu128Src->au64[0];
9679 pu128Dst->au64[1] = pu128Src->au64[1];
9680 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9681 }
9682 return rc;
9683}
9684
9685
9686#ifdef IEM_WITH_SETJMP
9687/**
9688 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9689 * related, longjmp on error.
9690 *
9691 * Raises \#GP(0) if not aligned.
9692 *
9693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9694 * @param pu128Dst Where to return the qword.
9695 * @param iSegReg The index of the segment register to use for
9696 * this access. The base and limits are checked.
9697 * @param GCPtrMem The address of the guest memory.
9698 */
9699DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9700{
9701 /* The lazy approach for now... */
9702 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9703 if ( (GCPtrMem & 15) == 0
9704 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9705 {
9706 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9707 pu128Dst->au64[0] = pu128Src->au64[0];
9708 pu128Dst->au64[1] = pu128Src->au64[1];
9709 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9710 return;
9711 }
9712
9713 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9714 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9715}
9716#endif
9717
9718
9719/**
9720 * Fetches a data oword (octo word), generally AVX related.
9721 *
9722 * @returns Strict VBox status code.
9723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9724 * @param pu256Dst Where to return the qword.
9725 * @param iSegReg The index of the segment register to use for
9726 * this access. The base and limits are checked.
9727 * @param GCPtrMem The address of the guest memory.
9728 */
9729IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9730{
9731 /* The lazy approach for now... */
9732 PCRTUINT256U pu256Src;
9733 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9734 if (rc == VINF_SUCCESS)
9735 {
9736 pu256Dst->au64[0] = pu256Src->au64[0];
9737 pu256Dst->au64[1] = pu256Src->au64[1];
9738 pu256Dst->au64[2] = pu256Src->au64[2];
9739 pu256Dst->au64[3] = pu256Src->au64[3];
9740 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9741 }
9742 return rc;
9743}
9744
9745
9746#ifdef IEM_WITH_SETJMP
9747/**
9748 * Fetches a data oword (octo word), generally AVX related.
9749 *
9750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9751 * @param pu256Dst Where to return the qword.
9752 * @param iSegReg The index of the segment register to use for
9753 * this access. The base and limits are checked.
9754 * @param GCPtrMem The address of the guest memory.
9755 */
9756IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9757{
9758 /* The lazy approach for now... */
9759 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9760 pu256Dst->au64[0] = pu256Src->au64[0];
9761 pu256Dst->au64[1] = pu256Src->au64[1];
9762 pu256Dst->au64[2] = pu256Src->au64[2];
9763 pu256Dst->au64[3] = pu256Src->au64[3];
9764 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9765}
9766#endif
9767
9768
9769/**
9770 * Fetches a data oword (octo word) at an aligned address, generally AVX
9771 * related.
9772 *
9773 * Raises \#GP(0) if not aligned.
9774 *
9775 * @returns Strict VBox status code.
9776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9777 * @param pu256Dst Where to return the qword.
9778 * @param iSegReg The index of the segment register to use for
9779 * this access. The base and limits are checked.
9780 * @param GCPtrMem The address of the guest memory.
9781 */
9782IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9783{
9784 /* The lazy approach for now... */
9785 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9786 if (GCPtrMem & 31)
9787 return iemRaiseGeneralProtectionFault0(pVCpu);
9788
9789 PCRTUINT256U pu256Src;
9790 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9791 if (rc == VINF_SUCCESS)
9792 {
9793 pu256Dst->au64[0] = pu256Src->au64[0];
9794 pu256Dst->au64[1] = pu256Src->au64[1];
9795 pu256Dst->au64[2] = pu256Src->au64[2];
9796 pu256Dst->au64[3] = pu256Src->au64[3];
9797 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9798 }
9799 return rc;
9800}
9801
9802
9803#ifdef IEM_WITH_SETJMP
9804/**
9805 * Fetches a data oword (octo word) at an aligned address, generally AVX
9806 * related, longjmp on error.
9807 *
9808 * Raises \#GP(0) if not aligned.
9809 *
9810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9811 * @param pu256Dst Where to return the qword.
9812 * @param iSegReg The index of the segment register to use for
9813 * this access. The base and limits are checked.
9814 * @param GCPtrMem The address of the guest memory.
9815 */
9816DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9817{
9818 /* The lazy approach for now... */
9819 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9820 if ((GCPtrMem & 31) == 0)
9821 {
9822 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9823 pu256Dst->au64[0] = pu256Src->au64[0];
9824 pu256Dst->au64[1] = pu256Src->au64[1];
9825 pu256Dst->au64[2] = pu256Src->au64[2];
9826 pu256Dst->au64[3] = pu256Src->au64[3];
9827 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9828 return;
9829 }
9830
9831 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9832 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9833}
9834#endif
9835
9836
9837
9838/**
9839 * Fetches a descriptor register (lgdt, lidt).
9840 *
9841 * @returns Strict VBox status code.
9842 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9843 * @param pcbLimit Where to return the limit.
9844 * @param pGCPtrBase Where to return the base.
9845 * @param iSegReg The index of the segment register to use for
9846 * this access. The base and limits are checked.
9847 * @param GCPtrMem The address of the guest memory.
9848 * @param enmOpSize The effective operand size.
9849 */
9850IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9851 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9852{
9853 /*
9854 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9855 * little special:
9856 * - The two reads are done separately.
9857 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9858 * - We suspect the 386 to actually commit the limit before the base in
9859 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9860 * don't try emulate this eccentric behavior, because it's not well
9861 * enough understood and rather hard to trigger.
9862 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9863 */
9864 VBOXSTRICTRC rcStrict;
9865 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9866 {
9867 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9868 if (rcStrict == VINF_SUCCESS)
9869 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9870 }
9871 else
9872 {
9873 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9874 if (enmOpSize == IEMMODE_32BIT)
9875 {
9876 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9877 {
9878 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9879 if (rcStrict == VINF_SUCCESS)
9880 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9881 }
9882 else
9883 {
9884 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9885 if (rcStrict == VINF_SUCCESS)
9886 {
9887 *pcbLimit = (uint16_t)uTmp;
9888 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9889 }
9890 }
9891 if (rcStrict == VINF_SUCCESS)
9892 *pGCPtrBase = uTmp;
9893 }
9894 else
9895 {
9896 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9897 if (rcStrict == VINF_SUCCESS)
9898 {
9899 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9900 if (rcStrict == VINF_SUCCESS)
9901 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9902 }
9903 }
9904 }
9905 return rcStrict;
9906}
9907
9908
9909
9910/**
9911 * Stores a data byte.
9912 *
9913 * @returns Strict VBox status code.
9914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9915 * @param iSegReg The index of the segment register to use for
9916 * this access. The base and limits are checked.
9917 * @param GCPtrMem The address of the guest memory.
9918 * @param u8Value The value to store.
9919 */
9920IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9921{
9922 /* The lazy approach for now... */
9923 uint8_t *pu8Dst;
9924 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9925 if (rc == VINF_SUCCESS)
9926 {
9927 *pu8Dst = u8Value;
9928 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9929 }
9930 return rc;
9931}
9932
9933
9934#ifdef IEM_WITH_SETJMP
9935/**
9936 * Stores a data byte, longjmp on error.
9937 *
9938 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9939 * @param iSegReg The index of the segment register to use for
9940 * this access. The base and limits are checked.
9941 * @param GCPtrMem The address of the guest memory.
9942 * @param u8Value The value to store.
9943 */
9944IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9945{
9946 /* The lazy approach for now... */
9947 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9948 *pu8Dst = u8Value;
9949 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9950}
9951#endif
9952
9953
9954/**
9955 * Stores a data word.
9956 *
9957 * @returns Strict VBox status code.
9958 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9959 * @param iSegReg The index of the segment register to use for
9960 * this access. The base and limits are checked.
9961 * @param GCPtrMem The address of the guest memory.
9962 * @param u16Value The value to store.
9963 */
9964IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9965{
9966 /* The lazy approach for now... */
9967 uint16_t *pu16Dst;
9968 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9969 if (rc == VINF_SUCCESS)
9970 {
9971 *pu16Dst = u16Value;
9972 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9973 }
9974 return rc;
9975}
9976
9977
9978#ifdef IEM_WITH_SETJMP
9979/**
9980 * Stores a data word, longjmp on error.
9981 *
9982 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9983 * @param iSegReg The index of the segment register to use for
9984 * this access. The base and limits are checked.
9985 * @param GCPtrMem The address of the guest memory.
9986 * @param u16Value The value to store.
9987 */
9988IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9989{
9990 /* The lazy approach for now... */
9991 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9992 *pu16Dst = u16Value;
9993 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9994}
9995#endif
9996
9997
9998/**
9999 * Stores a data dword.
10000 *
10001 * @returns Strict VBox status code.
10002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10003 * @param iSegReg The index of the segment register to use for
10004 * this access. The base and limits are checked.
10005 * @param GCPtrMem The address of the guest memory.
10006 * @param u32Value The value to store.
10007 */
10008IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10009{
10010 /* The lazy approach for now... */
10011 uint32_t *pu32Dst;
10012 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10013 if (rc == VINF_SUCCESS)
10014 {
10015 *pu32Dst = u32Value;
10016 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10017 }
10018 return rc;
10019}
10020
10021
10022#ifdef IEM_WITH_SETJMP
10023/**
10024 * Stores a data dword.
10025 *
10026 * @returns Strict VBox status code.
10027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10028 * @param iSegReg The index of the segment register to use for
10029 * this access. The base and limits are checked.
10030 * @param GCPtrMem The address of the guest memory.
10031 * @param u32Value The value to store.
10032 */
10033IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10034{
10035 /* The lazy approach for now... */
10036 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10037 *pu32Dst = u32Value;
10038 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10039}
10040#endif
10041
10042
10043/**
10044 * Stores a data qword.
10045 *
10046 * @returns Strict VBox status code.
10047 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10048 * @param iSegReg The index of the segment register to use for
10049 * this access. The base and limits are checked.
10050 * @param GCPtrMem The address of the guest memory.
10051 * @param u64Value The value to store.
10052 */
10053IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10054{
10055 /* The lazy approach for now... */
10056 uint64_t *pu64Dst;
10057 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10058 if (rc == VINF_SUCCESS)
10059 {
10060 *pu64Dst = u64Value;
10061 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10062 }
10063 return rc;
10064}
10065
10066
10067#ifdef IEM_WITH_SETJMP
10068/**
10069 * Stores a data qword, longjmp on error.
10070 *
10071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10072 * @param iSegReg The index of the segment register to use for
10073 * this access. The base and limits are checked.
10074 * @param GCPtrMem The address of the guest memory.
10075 * @param u64Value The value to store.
10076 */
10077IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10078{
10079 /* The lazy approach for now... */
10080 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10081 *pu64Dst = u64Value;
10082 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10083}
10084#endif
10085
10086
10087/**
10088 * Stores a data dqword.
10089 *
10090 * @returns Strict VBox status code.
10091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10092 * @param iSegReg The index of the segment register to use for
10093 * this access. The base and limits are checked.
10094 * @param GCPtrMem The address of the guest memory.
10095 * @param u128Value The value to store.
10096 */
10097IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10098{
10099 /* The lazy approach for now... */
10100 PRTUINT128U pu128Dst;
10101 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10102 if (rc == VINF_SUCCESS)
10103 {
10104 pu128Dst->au64[0] = u128Value.au64[0];
10105 pu128Dst->au64[1] = u128Value.au64[1];
10106 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10107 }
10108 return rc;
10109}
10110
10111
10112#ifdef IEM_WITH_SETJMP
10113/**
10114 * Stores a data dqword, longjmp on error.
10115 *
10116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10117 * @param iSegReg The index of the segment register to use for
10118 * this access. The base and limits are checked.
10119 * @param GCPtrMem The address of the guest memory.
10120 * @param u128Value The value to store.
10121 */
10122IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10123{
10124 /* The lazy approach for now... */
10125 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10126 pu128Dst->au64[0] = u128Value.au64[0];
10127 pu128Dst->au64[1] = u128Value.au64[1];
10128 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10129}
10130#endif
10131
10132
10133/**
10134 * Stores a data dqword, SSE aligned.
10135 *
10136 * @returns Strict VBox status code.
10137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10138 * @param iSegReg The index of the segment register to use for
10139 * this access. The base and limits are checked.
10140 * @param GCPtrMem The address of the guest memory.
10141 * @param u128Value The value to store.
10142 */
10143IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10144{
10145 /* The lazy approach for now... */
10146 if ( (GCPtrMem & 15)
10147 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10148 return iemRaiseGeneralProtectionFault0(pVCpu);
10149
10150 PRTUINT128U pu128Dst;
10151 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10152 if (rc == VINF_SUCCESS)
10153 {
10154 pu128Dst->au64[0] = u128Value.au64[0];
10155 pu128Dst->au64[1] = u128Value.au64[1];
10156 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10157 }
10158 return rc;
10159}
10160
10161
10162#ifdef IEM_WITH_SETJMP
10163/**
10164 * Stores a data dqword, SSE aligned.
10165 *
10166 * @returns Strict VBox status code.
10167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10168 * @param iSegReg The index of the segment register to use for
10169 * this access. The base and limits are checked.
10170 * @param GCPtrMem The address of the guest memory.
10171 * @param u128Value The value to store.
10172 */
10173DECL_NO_INLINE(IEM_STATIC, void)
10174iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10175{
10176 /* The lazy approach for now... */
10177 if ( (GCPtrMem & 15) == 0
10178 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10179 {
10180 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10181 pu128Dst->au64[0] = u128Value.au64[0];
10182 pu128Dst->au64[1] = u128Value.au64[1];
10183 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10184 return;
10185 }
10186
10187 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10188 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10189}
10190#endif
10191
10192
10193/**
10194 * Stores a data dqword.
10195 *
10196 * @returns Strict VBox status code.
10197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10198 * @param iSegReg The index of the segment register to use for
10199 * this access. The base and limits are checked.
10200 * @param GCPtrMem The address of the guest memory.
10201 * @param pu256Value Pointer to the value to store.
10202 */
10203IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10204{
10205 /* The lazy approach for now... */
10206 PRTUINT256U pu256Dst;
10207 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10208 if (rc == VINF_SUCCESS)
10209 {
10210 pu256Dst->au64[0] = pu256Value->au64[0];
10211 pu256Dst->au64[1] = pu256Value->au64[1];
10212 pu256Dst->au64[2] = pu256Value->au64[2];
10213 pu256Dst->au64[3] = pu256Value->au64[3];
10214 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10215 }
10216 return rc;
10217}
10218
10219
10220#ifdef IEM_WITH_SETJMP
10221/**
10222 * Stores a data dqword, longjmp on error.
10223 *
10224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10225 * @param iSegReg The index of the segment register to use for
10226 * this access. The base and limits are checked.
10227 * @param GCPtrMem The address of the guest memory.
10228 * @param pu256Value Pointer to the value to store.
10229 */
10230IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10231{
10232 /* The lazy approach for now... */
10233 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10234 pu256Dst->au64[0] = pu256Value->au64[0];
10235 pu256Dst->au64[1] = pu256Value->au64[1];
10236 pu256Dst->au64[2] = pu256Value->au64[2];
10237 pu256Dst->au64[3] = pu256Value->au64[3];
10238 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10239}
10240#endif
10241
10242
10243/**
10244 * Stores a data dqword, AVX aligned.
10245 *
10246 * @returns Strict VBox status code.
10247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10248 * @param iSegReg The index of the segment register to use for
10249 * this access. The base and limits are checked.
10250 * @param GCPtrMem The address of the guest memory.
10251 * @param pu256Value Pointer to the value to store.
10252 */
10253IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10254{
10255 /* The lazy approach for now... */
10256 if (GCPtrMem & 31)
10257 return iemRaiseGeneralProtectionFault0(pVCpu);
10258
10259 PRTUINT256U pu256Dst;
10260 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10261 if (rc == VINF_SUCCESS)
10262 {
10263 pu256Dst->au64[0] = pu256Value->au64[0];
10264 pu256Dst->au64[1] = pu256Value->au64[1];
10265 pu256Dst->au64[2] = pu256Value->au64[2];
10266 pu256Dst->au64[3] = pu256Value->au64[3];
10267 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10268 }
10269 return rc;
10270}
10271
10272
10273#ifdef IEM_WITH_SETJMP
10274/**
10275 * Stores a data dqword, AVX aligned.
10276 *
10277 * @returns Strict VBox status code.
10278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10279 * @param iSegReg The index of the segment register to use for
10280 * this access. The base and limits are checked.
10281 * @param GCPtrMem The address of the guest memory.
10282 * @param pu256Value Pointer to the value to store.
10283 */
10284DECL_NO_INLINE(IEM_STATIC, void)
10285iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10286{
10287 /* The lazy approach for now... */
10288 if ((GCPtrMem & 31) == 0)
10289 {
10290 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10291 pu256Dst->au64[0] = pu256Value->au64[0];
10292 pu256Dst->au64[1] = pu256Value->au64[1];
10293 pu256Dst->au64[2] = pu256Value->au64[2];
10294 pu256Dst->au64[3] = pu256Value->au64[3];
10295 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10296 return;
10297 }
10298
10299 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10300 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10301}
10302#endif
10303
10304
10305/**
10306 * Stores a descriptor register (sgdt, sidt).
10307 *
10308 * @returns Strict VBox status code.
10309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10310 * @param cbLimit The limit.
10311 * @param GCPtrBase The base address.
10312 * @param iSegReg The index of the segment register to use for
10313 * this access. The base and limits are checked.
10314 * @param GCPtrMem The address of the guest memory.
10315 */
10316IEM_STATIC VBOXSTRICTRC
10317iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10318{
10319 /*
10320 * The SIDT and SGDT instructions actually stores the data using two
10321 * independent writes. The instructions does not respond to opsize prefixes.
10322 */
10323 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10324 if (rcStrict == VINF_SUCCESS)
10325 {
10326 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10327 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10328 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10329 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10330 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10331 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10332 else
10333 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10334 }
10335 return rcStrict;
10336}
10337
10338
10339/**
10340 * Pushes a word onto the stack.
10341 *
10342 * @returns Strict VBox status code.
10343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10344 * @param u16Value The value to push.
10345 */
10346IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10347{
10348 /* Increment the stack pointer. */
10349 uint64_t uNewRsp;
10350 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10351 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10352
10353 /* Write the word the lazy way. */
10354 uint16_t *pu16Dst;
10355 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10356 if (rc == VINF_SUCCESS)
10357 {
10358 *pu16Dst = u16Value;
10359 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10360 }
10361
10362 /* Commit the new RSP value unless we an access handler made trouble. */
10363 if (rc == VINF_SUCCESS)
10364 pCtx->rsp = uNewRsp;
10365
10366 return rc;
10367}
10368
10369
10370/**
10371 * Pushes a dword onto the stack.
10372 *
10373 * @returns Strict VBox status code.
10374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10375 * @param u32Value The value to push.
10376 */
10377IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10378{
10379 /* Increment the stack pointer. */
10380 uint64_t uNewRsp;
10381 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10382 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10383
10384 /* Write the dword the lazy way. */
10385 uint32_t *pu32Dst;
10386 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10387 if (rc == VINF_SUCCESS)
10388 {
10389 *pu32Dst = u32Value;
10390 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10391 }
10392
10393 /* Commit the new RSP value unless we an access handler made trouble. */
10394 if (rc == VINF_SUCCESS)
10395 pCtx->rsp = uNewRsp;
10396
10397 return rc;
10398}
10399
10400
10401/**
10402 * Pushes a dword segment register value onto the stack.
10403 *
10404 * @returns Strict VBox status code.
10405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10406 * @param u32Value The value to push.
10407 */
10408IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10409{
10410 /* Increment the stack pointer. */
10411 uint64_t uNewRsp;
10412 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10413 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10414
10415 VBOXSTRICTRC rc;
10416 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10417 {
10418 /* The recompiler writes a full dword. */
10419 uint32_t *pu32Dst;
10420 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10421 if (rc == VINF_SUCCESS)
10422 {
10423 *pu32Dst = u32Value;
10424 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10425 }
10426 }
10427 else
10428 {
10429 /* The intel docs talks about zero extending the selector register
10430 value. My actual intel CPU here might be zero extending the value
10431 but it still only writes the lower word... */
10432 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10433 * happens when crossing an electric page boundrary, is the high word checked
10434 * for write accessibility or not? Probably it is. What about segment limits?
10435 * It appears this behavior is also shared with trap error codes.
10436 *
10437 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10438 * ancient hardware when it actually did change. */
10439 uint16_t *pu16Dst;
10440 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10441 if (rc == VINF_SUCCESS)
10442 {
10443 *pu16Dst = (uint16_t)u32Value;
10444 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10445 }
10446 }
10447
10448 /* Commit the new RSP value unless we an access handler made trouble. */
10449 if (rc == VINF_SUCCESS)
10450 pCtx->rsp = uNewRsp;
10451
10452 return rc;
10453}
10454
10455
10456/**
10457 * Pushes a qword onto the stack.
10458 *
10459 * @returns Strict VBox status code.
10460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10461 * @param u64Value The value to push.
10462 */
10463IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10464{
10465 /* Increment the stack pointer. */
10466 uint64_t uNewRsp;
10467 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10468 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10469
10470 /* Write the word the lazy way. */
10471 uint64_t *pu64Dst;
10472 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10473 if (rc == VINF_SUCCESS)
10474 {
10475 *pu64Dst = u64Value;
10476 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10477 }
10478
10479 /* Commit the new RSP value unless we an access handler made trouble. */
10480 if (rc == VINF_SUCCESS)
10481 pCtx->rsp = uNewRsp;
10482
10483 return rc;
10484}
10485
10486
10487/**
10488 * Pops a word from the stack.
10489 *
10490 * @returns Strict VBox status code.
10491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10492 * @param pu16Value Where to store the popped value.
10493 */
10494IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10495{
10496 /* Increment the stack pointer. */
10497 uint64_t uNewRsp;
10498 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10499 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10500
10501 /* Write the word the lazy way. */
10502 uint16_t const *pu16Src;
10503 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10504 if (rc == VINF_SUCCESS)
10505 {
10506 *pu16Value = *pu16Src;
10507 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10508
10509 /* Commit the new RSP value. */
10510 if (rc == VINF_SUCCESS)
10511 pCtx->rsp = uNewRsp;
10512 }
10513
10514 return rc;
10515}
10516
10517
10518/**
10519 * Pops a dword from the stack.
10520 *
10521 * @returns Strict VBox status code.
10522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10523 * @param pu32Value Where to store the popped value.
10524 */
10525IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10526{
10527 /* Increment the stack pointer. */
10528 uint64_t uNewRsp;
10529 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10530 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10531
10532 /* Write the word the lazy way. */
10533 uint32_t const *pu32Src;
10534 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10535 if (rc == VINF_SUCCESS)
10536 {
10537 *pu32Value = *pu32Src;
10538 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10539
10540 /* Commit the new RSP value. */
10541 if (rc == VINF_SUCCESS)
10542 pCtx->rsp = uNewRsp;
10543 }
10544
10545 return rc;
10546}
10547
10548
10549/**
10550 * Pops a qword from the stack.
10551 *
10552 * @returns Strict VBox status code.
10553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10554 * @param pu64Value Where to store the popped value.
10555 */
10556IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10557{
10558 /* Increment the stack pointer. */
10559 uint64_t uNewRsp;
10560 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10561 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10562
10563 /* Write the word the lazy way. */
10564 uint64_t const *pu64Src;
10565 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10566 if (rc == VINF_SUCCESS)
10567 {
10568 *pu64Value = *pu64Src;
10569 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10570
10571 /* Commit the new RSP value. */
10572 if (rc == VINF_SUCCESS)
10573 pCtx->rsp = uNewRsp;
10574 }
10575
10576 return rc;
10577}
10578
10579
10580/**
10581 * Pushes a word onto the stack, using a temporary stack pointer.
10582 *
10583 * @returns Strict VBox status code.
10584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10585 * @param u16Value The value to push.
10586 * @param pTmpRsp Pointer to the temporary stack pointer.
10587 */
10588IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10589{
10590 /* Increment the stack pointer. */
10591 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10592 RTUINT64U NewRsp = *pTmpRsp;
10593 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10594
10595 /* Write the word the lazy way. */
10596 uint16_t *pu16Dst;
10597 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10598 if (rc == VINF_SUCCESS)
10599 {
10600 *pu16Dst = u16Value;
10601 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10602 }
10603
10604 /* Commit the new RSP value unless we an access handler made trouble. */
10605 if (rc == VINF_SUCCESS)
10606 *pTmpRsp = NewRsp;
10607
10608 return rc;
10609}
10610
10611
10612/**
10613 * Pushes a dword onto the stack, using a temporary stack pointer.
10614 *
10615 * @returns Strict VBox status code.
10616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10617 * @param u32Value The value to push.
10618 * @param pTmpRsp Pointer to the temporary stack pointer.
10619 */
10620IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10621{
10622 /* Increment the stack pointer. */
10623 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10624 RTUINT64U NewRsp = *pTmpRsp;
10625 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10626
10627 /* Write the word the lazy way. */
10628 uint32_t *pu32Dst;
10629 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10630 if (rc == VINF_SUCCESS)
10631 {
10632 *pu32Dst = u32Value;
10633 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10634 }
10635
10636 /* Commit the new RSP value unless we an access handler made trouble. */
10637 if (rc == VINF_SUCCESS)
10638 *pTmpRsp = NewRsp;
10639
10640 return rc;
10641}
10642
10643
10644/**
10645 * Pushes a dword onto the stack, using a temporary stack pointer.
10646 *
10647 * @returns Strict VBox status code.
10648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10649 * @param u64Value The value to push.
10650 * @param pTmpRsp Pointer to the temporary stack pointer.
10651 */
10652IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10653{
10654 /* Increment the stack pointer. */
10655 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10656 RTUINT64U NewRsp = *pTmpRsp;
10657 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10658
10659 /* Write the word the lazy way. */
10660 uint64_t *pu64Dst;
10661 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10662 if (rc == VINF_SUCCESS)
10663 {
10664 *pu64Dst = u64Value;
10665 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10666 }
10667
10668 /* Commit the new RSP value unless we an access handler made trouble. */
10669 if (rc == VINF_SUCCESS)
10670 *pTmpRsp = NewRsp;
10671
10672 return rc;
10673}
10674
10675
10676/**
10677 * Pops a word from the stack, using a temporary stack pointer.
10678 *
10679 * @returns Strict VBox status code.
10680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10681 * @param pu16Value Where to store the popped value.
10682 * @param pTmpRsp Pointer to the temporary stack pointer.
10683 */
10684IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10685{
10686 /* Increment the stack pointer. */
10687 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10688 RTUINT64U NewRsp = *pTmpRsp;
10689 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10690
10691 /* Write the word the lazy way. */
10692 uint16_t const *pu16Src;
10693 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10694 if (rc == VINF_SUCCESS)
10695 {
10696 *pu16Value = *pu16Src;
10697 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10698
10699 /* Commit the new RSP value. */
10700 if (rc == VINF_SUCCESS)
10701 *pTmpRsp = NewRsp;
10702 }
10703
10704 return rc;
10705}
10706
10707
10708/**
10709 * Pops a dword from the stack, using a temporary stack pointer.
10710 *
10711 * @returns Strict VBox status code.
10712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10713 * @param pu32Value Where to store the popped value.
10714 * @param pTmpRsp Pointer to the temporary stack pointer.
10715 */
10716IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10717{
10718 /* Increment the stack pointer. */
10719 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10720 RTUINT64U NewRsp = *pTmpRsp;
10721 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10722
10723 /* Write the word the lazy way. */
10724 uint32_t const *pu32Src;
10725 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10726 if (rc == VINF_SUCCESS)
10727 {
10728 *pu32Value = *pu32Src;
10729 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10730
10731 /* Commit the new RSP value. */
10732 if (rc == VINF_SUCCESS)
10733 *pTmpRsp = NewRsp;
10734 }
10735
10736 return rc;
10737}
10738
10739
10740/**
10741 * Pops a qword from the stack, using a temporary stack pointer.
10742 *
10743 * @returns Strict VBox status code.
10744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10745 * @param pu64Value Where to store the popped value.
10746 * @param pTmpRsp Pointer to the temporary stack pointer.
10747 */
10748IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10749{
10750 /* Increment the stack pointer. */
10751 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10752 RTUINT64U NewRsp = *pTmpRsp;
10753 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10754
10755 /* Write the word the lazy way. */
10756 uint64_t const *pu64Src;
10757 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10758 if (rcStrict == VINF_SUCCESS)
10759 {
10760 *pu64Value = *pu64Src;
10761 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10762
10763 /* Commit the new RSP value. */
10764 if (rcStrict == VINF_SUCCESS)
10765 *pTmpRsp = NewRsp;
10766 }
10767
10768 return rcStrict;
10769}
10770
10771
10772/**
10773 * Begin a special stack push (used by interrupt, exceptions and such).
10774 *
10775 * This will raise \#SS or \#PF if appropriate.
10776 *
10777 * @returns Strict VBox status code.
10778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10779 * @param cbMem The number of bytes to push onto the stack.
10780 * @param ppvMem Where to return the pointer to the stack memory.
10781 * As with the other memory functions this could be
10782 * direct access or bounce buffered access, so
10783 * don't commit register until the commit call
10784 * succeeds.
10785 * @param puNewRsp Where to return the new RSP value. This must be
10786 * passed unchanged to
10787 * iemMemStackPushCommitSpecial().
10788 */
10789IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10790{
10791 Assert(cbMem < UINT8_MAX);
10792 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10793 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10794 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10795}
10796
10797
10798/**
10799 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10800 *
10801 * This will update the rSP.
10802 *
10803 * @returns Strict VBox status code.
10804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10805 * @param pvMem The pointer returned by
10806 * iemMemStackPushBeginSpecial().
10807 * @param uNewRsp The new RSP value returned by
10808 * iemMemStackPushBeginSpecial().
10809 */
10810IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10811{
10812 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10813 if (rcStrict == VINF_SUCCESS)
10814 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10815 return rcStrict;
10816}
10817
10818
10819/**
10820 * Begin a special stack pop (used by iret, retf and such).
10821 *
10822 * This will raise \#SS or \#PF if appropriate.
10823 *
10824 * @returns Strict VBox status code.
10825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10826 * @param cbMem The number of bytes to pop from the stack.
10827 * @param ppvMem Where to return the pointer to the stack memory.
10828 * @param puNewRsp Where to return the new RSP value. This must be
10829 * assigned to CPUMCTX::rsp manually some time
10830 * after iemMemStackPopDoneSpecial() has been
10831 * called.
10832 */
10833IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10834{
10835 Assert(cbMem < UINT8_MAX);
10836 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10837 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10838 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10839}
10840
10841
10842/**
10843 * Continue a special stack pop (used by iret and retf).
10844 *
10845 * This will raise \#SS or \#PF if appropriate.
10846 *
10847 * @returns Strict VBox status code.
10848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10849 * @param cbMem The number of bytes to pop from the stack.
10850 * @param ppvMem Where to return the pointer to the stack memory.
10851 * @param puNewRsp Where to return the new RSP value. This must be
10852 * assigned to CPUMCTX::rsp manually some time
10853 * after iemMemStackPopDoneSpecial() has been
10854 * called.
10855 */
10856IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10857{
10858 Assert(cbMem < UINT8_MAX);
10859 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10860 RTUINT64U NewRsp;
10861 NewRsp.u = *puNewRsp;
10862 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10863 *puNewRsp = NewRsp.u;
10864 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10865}
10866
10867
10868/**
10869 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10870 * iemMemStackPopContinueSpecial).
10871 *
10872 * The caller will manually commit the rSP.
10873 *
10874 * @returns Strict VBox status code.
10875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10876 * @param pvMem The pointer returned by
10877 * iemMemStackPopBeginSpecial() or
10878 * iemMemStackPopContinueSpecial().
10879 */
10880IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10881{
10882 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10883}
10884
10885
10886/**
10887 * Fetches a system table byte.
10888 *
10889 * @returns Strict VBox status code.
10890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10891 * @param pbDst Where to return the byte.
10892 * @param iSegReg The index of the segment register to use for
10893 * this access. The base and limits are checked.
10894 * @param GCPtrMem The address of the guest memory.
10895 */
10896IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10897{
10898 /* The lazy approach for now... */
10899 uint8_t const *pbSrc;
10900 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10901 if (rc == VINF_SUCCESS)
10902 {
10903 *pbDst = *pbSrc;
10904 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10905 }
10906 return rc;
10907}
10908
10909
10910/**
10911 * Fetches a system table word.
10912 *
10913 * @returns Strict VBox status code.
10914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10915 * @param pu16Dst Where to return the word.
10916 * @param iSegReg The index of the segment register to use for
10917 * this access. The base and limits are checked.
10918 * @param GCPtrMem The address of the guest memory.
10919 */
10920IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10921{
10922 /* The lazy approach for now... */
10923 uint16_t const *pu16Src;
10924 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10925 if (rc == VINF_SUCCESS)
10926 {
10927 *pu16Dst = *pu16Src;
10928 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10929 }
10930 return rc;
10931}
10932
10933
10934/**
10935 * Fetches a system table dword.
10936 *
10937 * @returns Strict VBox status code.
10938 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10939 * @param pu32Dst Where to return the dword.
10940 * @param iSegReg The index of the segment register to use for
10941 * this access. The base and limits are checked.
10942 * @param GCPtrMem The address of the guest memory.
10943 */
10944IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10945{
10946 /* The lazy approach for now... */
10947 uint32_t const *pu32Src;
10948 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10949 if (rc == VINF_SUCCESS)
10950 {
10951 *pu32Dst = *pu32Src;
10952 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10953 }
10954 return rc;
10955}
10956
10957
10958/**
10959 * Fetches a system table qword.
10960 *
10961 * @returns Strict VBox status code.
10962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10963 * @param pu64Dst Where to return the qword.
10964 * @param iSegReg The index of the segment register to use for
10965 * this access. The base and limits are checked.
10966 * @param GCPtrMem The address of the guest memory.
10967 */
10968IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10969{
10970 /* The lazy approach for now... */
10971 uint64_t const *pu64Src;
10972 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10973 if (rc == VINF_SUCCESS)
10974 {
10975 *pu64Dst = *pu64Src;
10976 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10977 }
10978 return rc;
10979}
10980
10981
10982/**
10983 * Fetches a descriptor table entry with caller specified error code.
10984 *
10985 * @returns Strict VBox status code.
10986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10987 * @param pDesc Where to return the descriptor table entry.
10988 * @param uSel The selector which table entry to fetch.
10989 * @param uXcpt The exception to raise on table lookup error.
10990 * @param uErrorCode The error code associated with the exception.
10991 */
10992IEM_STATIC VBOXSTRICTRC
10993iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10994{
10995 AssertPtr(pDesc);
10996 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10997
10998 /** @todo did the 286 require all 8 bytes to be accessible? */
10999 /*
11000 * Get the selector table base and check bounds.
11001 */
11002 RTGCPTR GCPtrBase;
11003 if (uSel & X86_SEL_LDT)
11004 {
11005 if ( !pCtx->ldtr.Attr.n.u1Present
11006 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
11007 {
11008 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
11009 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
11010 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11011 uErrorCode, 0);
11012 }
11013
11014 Assert(pCtx->ldtr.Attr.n.u1Present);
11015 GCPtrBase = pCtx->ldtr.u64Base;
11016 }
11017 else
11018 {
11019 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
11020 {
11021 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
11022 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11023 uErrorCode, 0);
11024 }
11025 GCPtrBase = pCtx->gdtr.pGdt;
11026 }
11027
11028 /*
11029 * Read the legacy descriptor and maybe the long mode extensions if
11030 * required.
11031 */
11032 VBOXSTRICTRC rcStrict;
11033 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11034 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11035 else
11036 {
11037 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11038 if (rcStrict == VINF_SUCCESS)
11039 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11040 if (rcStrict == VINF_SUCCESS)
11041 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11042 if (rcStrict == VINF_SUCCESS)
11043 pDesc->Legacy.au16[3] = 0;
11044 else
11045 return rcStrict;
11046 }
11047
11048 if (rcStrict == VINF_SUCCESS)
11049 {
11050 if ( !IEM_IS_LONG_MODE(pVCpu)
11051 || pDesc->Legacy.Gen.u1DescType)
11052 pDesc->Long.au64[1] = 0;
11053 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
11054 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11055 else
11056 {
11057 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11058 /** @todo is this the right exception? */
11059 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11060 }
11061 }
11062 return rcStrict;
11063}
11064
11065
11066/**
11067 * Fetches a descriptor table entry.
11068 *
11069 * @returns Strict VBox status code.
11070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11071 * @param pDesc Where to return the descriptor table entry.
11072 * @param uSel The selector which table entry to fetch.
11073 * @param uXcpt The exception to raise on table lookup error.
11074 */
11075IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11076{
11077 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11078}
11079
11080
11081/**
11082 * Fakes a long mode stack selector for SS = 0.
11083 *
11084 * @param pDescSs Where to return the fake stack descriptor.
11085 * @param uDpl The DPL we want.
11086 */
11087IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11088{
11089 pDescSs->Long.au64[0] = 0;
11090 pDescSs->Long.au64[1] = 0;
11091 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11092 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11093 pDescSs->Long.Gen.u2Dpl = uDpl;
11094 pDescSs->Long.Gen.u1Present = 1;
11095 pDescSs->Long.Gen.u1Long = 1;
11096}
11097
11098
11099/**
11100 * Marks the selector descriptor as accessed (only non-system descriptors).
11101 *
11102 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11103 * will therefore skip the limit checks.
11104 *
11105 * @returns Strict VBox status code.
11106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11107 * @param uSel The selector.
11108 */
11109IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11110{
11111 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11112
11113 /*
11114 * Get the selector table base and calculate the entry address.
11115 */
11116 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11117 ? pCtx->ldtr.u64Base
11118 : pCtx->gdtr.pGdt;
11119 GCPtr += uSel & X86_SEL_MASK;
11120
11121 /*
11122 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11123 * ugly stuff to avoid this. This will make sure it's an atomic access
11124 * as well more or less remove any question about 8-bit or 32-bit accesss.
11125 */
11126 VBOXSTRICTRC rcStrict;
11127 uint32_t volatile *pu32;
11128 if ((GCPtr & 3) == 0)
11129 {
11130 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11131 GCPtr += 2 + 2;
11132 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11133 if (rcStrict != VINF_SUCCESS)
11134 return rcStrict;
11135 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11136 }
11137 else
11138 {
11139 /* The misaligned GDT/LDT case, map the whole thing. */
11140 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11141 if (rcStrict != VINF_SUCCESS)
11142 return rcStrict;
11143 switch ((uintptr_t)pu32 & 3)
11144 {
11145 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11146 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11147 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11148 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11149 }
11150 }
11151
11152 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11153}
11154
11155/** @} */
11156
11157
11158/*
11159 * Include the C/C++ implementation of instruction.
11160 */
11161#include "IEMAllCImpl.cpp.h"
11162
11163
11164
11165/** @name "Microcode" macros.
11166 *
11167 * The idea is that we should be able to use the same code to interpret
11168 * instructions as well as recompiler instructions. Thus this obfuscation.
11169 *
11170 * @{
11171 */
11172#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11173#define IEM_MC_END() }
11174#define IEM_MC_PAUSE() do {} while (0)
11175#define IEM_MC_CONTINUE() do {} while (0)
11176
11177/** Internal macro. */
11178#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11179 do \
11180 { \
11181 VBOXSTRICTRC rcStrict2 = a_Expr; \
11182 if (rcStrict2 != VINF_SUCCESS) \
11183 return rcStrict2; \
11184 } while (0)
11185
11186
11187#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11188#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11189#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11190#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11191#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11192#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11193#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11194#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11195#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11196 do { \
11197 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11198 return iemRaiseDeviceNotAvailable(pVCpu); \
11199 } while (0)
11200#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11201 do { \
11202 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11203 return iemRaiseDeviceNotAvailable(pVCpu); \
11204 } while (0)
11205#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11206 do { \
11207 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11208 return iemRaiseMathFault(pVCpu); \
11209 } while (0)
11210#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11211 do { \
11212 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11213 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11214 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11215 return iemRaiseUndefinedOpcode(pVCpu); \
11216 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11217 return iemRaiseDeviceNotAvailable(pVCpu); \
11218 } while (0)
11219#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11220 do { \
11221 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11222 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11223 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11224 return iemRaiseUndefinedOpcode(pVCpu); \
11225 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11226 return iemRaiseDeviceNotAvailable(pVCpu); \
11227 } while (0)
11228#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11229 do { \
11230 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11231 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11232 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11233 return iemRaiseUndefinedOpcode(pVCpu); \
11234 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11235 return iemRaiseDeviceNotAvailable(pVCpu); \
11236 } while (0)
11237#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11238 do { \
11239 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11240 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11241 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11242 return iemRaiseUndefinedOpcode(pVCpu); \
11243 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11244 return iemRaiseDeviceNotAvailable(pVCpu); \
11245 } while (0)
11246#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11247 do { \
11248 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11249 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11250 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11251 return iemRaiseUndefinedOpcode(pVCpu); \
11252 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11253 return iemRaiseDeviceNotAvailable(pVCpu); \
11254 } while (0)
11255#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11256 do { \
11257 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11258 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11259 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11260 return iemRaiseUndefinedOpcode(pVCpu); \
11261 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11262 return iemRaiseDeviceNotAvailable(pVCpu); \
11263 } while (0)
11264#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11265 do { \
11266 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11267 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11268 return iemRaiseUndefinedOpcode(pVCpu); \
11269 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11270 return iemRaiseDeviceNotAvailable(pVCpu); \
11271 } while (0)
11272#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11273 do { \
11274 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11275 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11276 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11277 return iemRaiseUndefinedOpcode(pVCpu); \
11278 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11279 return iemRaiseDeviceNotAvailable(pVCpu); \
11280 } while (0)
11281#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11282 do { \
11283 if (pVCpu->iem.s.uCpl != 0) \
11284 return iemRaiseGeneralProtectionFault0(pVCpu); \
11285 } while (0)
11286#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11287 do { \
11288 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11289 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11290 } while (0)
11291#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11292 do { \
11293 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11294 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11295 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_FSGSBASE)) \
11296 return iemRaiseUndefinedOpcode(pVCpu); \
11297 } while (0)
11298#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11299 do { \
11300 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11301 return iemRaiseGeneralProtectionFault0(pVCpu); \
11302 } while (0)
11303
11304
11305#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11306#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11307#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11308#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11309#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11310#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11311#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11312 uint32_t a_Name; \
11313 uint32_t *a_pName = &a_Name
11314#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11315 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11316
11317#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11318#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11319
11320#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11321#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11322#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11323#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11324#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11325#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11326#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11327#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11328#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11329#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11330#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11331#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11332#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11333#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11334#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11335#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11336#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11337#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11338#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11339#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11340#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11341#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg));
11342#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11343#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11344#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11345#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11346#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11347#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11348#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11349#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11350#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11351/** @note Not for IOPL or IF testing or modification. */
11352#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11353#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11354#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11355#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11356
11357#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11358#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11359#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11360#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11361#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11362#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11363#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11364#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11365#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11366#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11367#define IEM_MC_STORE_SREG_BASE_U64(a_iSeg, a_u64Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (a_u64Value)
11368#define IEM_MC_STORE_SREG_BASE_U32(a_iSeg, a_u32Value) *iemSRegBaseRefU64(pVCpu, (a_iSeg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11369#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11370 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11371
11372
11373#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11374#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11375/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11376 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11377#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11378#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11379/** @note Not for IOPL or IF testing or modification. */
11380#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11381
11382#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11383#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11384#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11385 do { \
11386 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11387 *pu32Reg += (a_u32Value); \
11388 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11389 } while (0)
11390#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11391
11392#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11393#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11394#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11395 do { \
11396 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11397 *pu32Reg -= (a_u32Value); \
11398 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11399 } while (0)
11400#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11401#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11402
11403#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11404#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11405#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11406#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11407#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11408#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11409#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11410
11411#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11412#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11413#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11414#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11415
11416#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11417#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11418#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11419
11420#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11421#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11422#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11423
11424#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11425#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11426#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11427
11428#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11429#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11430#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11431
11432#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11433
11434#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11435
11436#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11437#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11438#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11439 do { \
11440 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11441 *pu32Reg &= (a_u32Value); \
11442 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11443 } while (0)
11444#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11445
11446#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11447#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11448#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11449 do { \
11450 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11451 *pu32Reg |= (a_u32Value); \
11452 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11453 } while (0)
11454#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11455
11456
11457/** @note Not for IOPL or IF modification. */
11458#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11459/** @note Not for IOPL or IF modification. */
11460#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11461/** @note Not for IOPL or IF modification. */
11462#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11463
11464#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11465
11466/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11467#define IEM_MC_FPU_TO_MMX_MODE() do { \
11468 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11469 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11470 } while (0)
11471
11472/** Switches the FPU state from MMX mode (FTW=0xffff). */
11473#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11474 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0; \
11475 } while (0)
11476
11477#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11478 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11479#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11480 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11481#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11482 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11483 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11484 } while (0)
11485#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11486 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11487 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11488 } while (0)
11489#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11490 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11491#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11492 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11493#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11494 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11495
11496#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11497 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11498 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11499 } while (0)
11500#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11501 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11502#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11503 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11504#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11505 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11506#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11507 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11508 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11509 } while (0)
11510#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11511 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11512#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11513 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11514 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11515 } while (0)
11516#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11517 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11518#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11519 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11520 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11521 } while (0)
11522#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11523 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11524#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11525 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11526#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11527 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11528#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11529 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11530#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11531 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11532 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11533 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11534 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11535 } while (0)
11536
11537#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11538 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11539 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11540 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11541 } while (0)
11542#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11543 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11544 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11545 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11546 } while (0)
11547#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11548 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11549 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11550 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11551 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11552 } while (0)
11553#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11554 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11555 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11556 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11557 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11558 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11559 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11560 } while (0)
11561
11562#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11563#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11564 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11565 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11566 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11567 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11568 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11569 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11570 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11571 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11572 } while (0)
11573#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11574 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11575 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11576 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11577 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11578 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11579 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11580 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11581 } while (0)
11582#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11583 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11584 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11585 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11586 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11587 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11588 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11589 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11590 } while (0)
11591#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11592 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11593 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11594 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11595 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11596 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11597 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11598 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11599 } while (0)
11600
11601#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11602 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11603#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11604 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11605#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11606 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11607#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11608 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11609 uintptr_t const iYRegTmp = (a_iYReg); \
11610 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11611 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11612 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11613 } while (0)
11614
11615#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11616 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11617 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11618 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11619 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11620 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11621 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11622 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11623 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11624 } while (0)
11625#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11626 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11627 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11628 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11629 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11630 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11631 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11632 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11633 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11634 } while (0)
11635#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11636 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11637 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11638 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11639 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11640 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11641 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11642 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11643 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11644 } while (0)
11645
11646#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11647 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11648 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11649 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11650 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11651 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11652 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11653 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11654 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11655 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11656 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11657 } while (0)
11658#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11659 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11660 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11661 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11662 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11663 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11664 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11665 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11666 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11667 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11668 } while (0)
11669#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11670 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11671 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11672 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11673 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11674 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11675 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11676 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11677 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11678 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11679 } while (0)
11680#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11681 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11682 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11683 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11684 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11685 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11686 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11687 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11688 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11689 } while (0)
11690
11691#ifndef IEM_WITH_SETJMP
11692# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11694# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11696# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11697 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11698#else
11699# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11700 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11701# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11702 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11703# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11704 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11705#endif
11706
11707#ifndef IEM_WITH_SETJMP
11708# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11709 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11710# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11711 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11712# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11714#else
11715# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11716 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11717# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11718 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11719# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11720 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11721#endif
11722
11723#ifndef IEM_WITH_SETJMP
11724# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11725 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11726# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11727 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11728# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11730#else
11731# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11732 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11733# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11734 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11735# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11736 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11737#endif
11738
11739#ifdef SOME_UNUSED_FUNCTION
11740# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11742#endif
11743
11744#ifndef IEM_WITH_SETJMP
11745# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11746 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11747# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11748 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11749# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11750 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11751# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11753#else
11754# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11755 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11756# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11757 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11758# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11759 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11760# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11761 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11762#endif
11763
11764#ifndef IEM_WITH_SETJMP
11765# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11766 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11767# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11769# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11770 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11771#else
11772# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11773 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11774# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11775 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11776# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11777 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11778#endif
11779
11780#ifndef IEM_WITH_SETJMP
11781# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11782 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11783# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11784 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11785#else
11786# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11787 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11788# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11789 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11790#endif
11791
11792#ifndef IEM_WITH_SETJMP
11793# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11794 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11795# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11796 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11797#else
11798# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11799 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11800# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11801 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11802#endif
11803
11804
11805
11806#ifndef IEM_WITH_SETJMP
11807# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11808 do { \
11809 uint8_t u8Tmp; \
11810 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11811 (a_u16Dst) = u8Tmp; \
11812 } while (0)
11813# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11814 do { \
11815 uint8_t u8Tmp; \
11816 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11817 (a_u32Dst) = u8Tmp; \
11818 } while (0)
11819# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11820 do { \
11821 uint8_t u8Tmp; \
11822 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11823 (a_u64Dst) = u8Tmp; \
11824 } while (0)
11825# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11826 do { \
11827 uint16_t u16Tmp; \
11828 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11829 (a_u32Dst) = u16Tmp; \
11830 } while (0)
11831# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11832 do { \
11833 uint16_t u16Tmp; \
11834 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11835 (a_u64Dst) = u16Tmp; \
11836 } while (0)
11837# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11838 do { \
11839 uint32_t u32Tmp; \
11840 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11841 (a_u64Dst) = u32Tmp; \
11842 } while (0)
11843#else /* IEM_WITH_SETJMP */
11844# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11845 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11846# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11847 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11848# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11849 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11850# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11851 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11852# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11853 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11854# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11855 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11856#endif /* IEM_WITH_SETJMP */
11857
11858#ifndef IEM_WITH_SETJMP
11859# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11860 do { \
11861 uint8_t u8Tmp; \
11862 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11863 (a_u16Dst) = (int8_t)u8Tmp; \
11864 } while (0)
11865# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11866 do { \
11867 uint8_t u8Tmp; \
11868 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11869 (a_u32Dst) = (int8_t)u8Tmp; \
11870 } while (0)
11871# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11872 do { \
11873 uint8_t u8Tmp; \
11874 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11875 (a_u64Dst) = (int8_t)u8Tmp; \
11876 } while (0)
11877# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11878 do { \
11879 uint16_t u16Tmp; \
11880 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11881 (a_u32Dst) = (int16_t)u16Tmp; \
11882 } while (0)
11883# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11884 do { \
11885 uint16_t u16Tmp; \
11886 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11887 (a_u64Dst) = (int16_t)u16Tmp; \
11888 } while (0)
11889# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11890 do { \
11891 uint32_t u32Tmp; \
11892 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11893 (a_u64Dst) = (int32_t)u32Tmp; \
11894 } while (0)
11895#else /* IEM_WITH_SETJMP */
11896# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11897 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11898# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11899 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11900# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11901 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11902# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11903 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11904# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11905 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11906# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11907 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11908#endif /* IEM_WITH_SETJMP */
11909
11910#ifndef IEM_WITH_SETJMP
11911# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11912 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11913# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11914 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11915# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11916 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11917# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11918 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11919#else
11920# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11921 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11922# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11923 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11924# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11925 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11926# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11927 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11928#endif
11929
11930#ifndef IEM_WITH_SETJMP
11931# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11932 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11933# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11934 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11935# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11936 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11937# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11938 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11939#else
11940# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11941 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11942# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11943 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11944# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11945 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11946# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11947 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11948#endif
11949
11950#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11951#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11952#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11953#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11954#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11955#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11956#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11957 do { \
11958 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11959 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11960 } while (0)
11961
11962#ifndef IEM_WITH_SETJMP
11963# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11964 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11965# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11966 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11967#else
11968# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11969 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11970# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11971 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11972#endif
11973
11974#ifndef IEM_WITH_SETJMP
11975# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11976 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11977# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11978 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11979#else
11980# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11981 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11982# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11983 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11984#endif
11985
11986
11987#define IEM_MC_PUSH_U16(a_u16Value) \
11988 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11989#define IEM_MC_PUSH_U32(a_u32Value) \
11990 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11991#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11992 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11993#define IEM_MC_PUSH_U64(a_u64Value) \
11994 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11995
11996#define IEM_MC_POP_U16(a_pu16Value) \
11997 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11998#define IEM_MC_POP_U32(a_pu32Value) \
11999 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
12000#define IEM_MC_POP_U64(a_pu64Value) \
12001 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
12002
12003/** Maps guest memory for direct or bounce buffered access.
12004 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12005 * @remarks May return.
12006 */
12007#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
12008 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12009
12010/** Maps guest memory for direct or bounce buffered access.
12011 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12012 * @remarks May return.
12013 */
12014#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12015 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12016
12017/** Commits the memory and unmaps the guest memory.
12018 * @remarks May return.
12019 */
12020#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12021 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12022
12023/** Commits the memory and unmaps the guest memory unless the FPU status word
12024 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12025 * that would cause FLD not to store.
12026 *
12027 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12028 * store, while \#P will not.
12029 *
12030 * @remarks May in theory return - for now.
12031 */
12032#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12033 do { \
12034 if ( !(a_u16FSW & X86_FSW_ES) \
12035 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12036 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12037 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12038 } while (0)
12039
12040/** Calculate efficient address from R/M. */
12041#ifndef IEM_WITH_SETJMP
12042# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12043 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12044#else
12045# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12046 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12047#endif
12048
12049#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12050#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12051#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12052#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12053#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12054#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12055#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12056
12057/**
12058 * Defers the rest of the instruction emulation to a C implementation routine
12059 * and returns, only taking the standard parameters.
12060 *
12061 * @param a_pfnCImpl The pointer to the C routine.
12062 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12063 */
12064#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12065
12066/**
12067 * Defers the rest of instruction emulation to a C implementation routine and
12068 * returns, taking one argument in addition to the standard ones.
12069 *
12070 * @param a_pfnCImpl The pointer to the C routine.
12071 * @param a0 The argument.
12072 */
12073#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12074
12075/**
12076 * Defers the rest of the instruction emulation to a C implementation routine
12077 * and returns, taking two arguments in addition to the standard ones.
12078 *
12079 * @param a_pfnCImpl The pointer to the C routine.
12080 * @param a0 The first extra argument.
12081 * @param a1 The second extra argument.
12082 */
12083#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12084
12085/**
12086 * Defers the rest of the instruction emulation to a C implementation routine
12087 * and returns, taking three arguments in addition to the standard ones.
12088 *
12089 * @param a_pfnCImpl The pointer to the C routine.
12090 * @param a0 The first extra argument.
12091 * @param a1 The second extra argument.
12092 * @param a2 The third extra argument.
12093 */
12094#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12095
12096/**
12097 * Defers the rest of the instruction emulation to a C implementation routine
12098 * and returns, taking four arguments in addition to the standard ones.
12099 *
12100 * @param a_pfnCImpl The pointer to the C routine.
12101 * @param a0 The first extra argument.
12102 * @param a1 The second extra argument.
12103 * @param a2 The third extra argument.
12104 * @param a3 The fourth extra argument.
12105 */
12106#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12107
12108/**
12109 * Defers the rest of the instruction emulation to a C implementation routine
12110 * and returns, taking two arguments in addition to the standard ones.
12111 *
12112 * @param a_pfnCImpl The pointer to the C routine.
12113 * @param a0 The first extra argument.
12114 * @param a1 The second extra argument.
12115 * @param a2 The third extra argument.
12116 * @param a3 The fourth extra argument.
12117 * @param a4 The fifth extra argument.
12118 */
12119#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12120
12121/**
12122 * Defers the entire instruction emulation to a C implementation routine and
12123 * returns, only taking the standard parameters.
12124 *
12125 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12126 *
12127 * @param a_pfnCImpl The pointer to the C routine.
12128 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12129 */
12130#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12131
12132/**
12133 * Defers the entire instruction emulation to a C implementation routine and
12134 * returns, taking one argument in addition to the standard ones.
12135 *
12136 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12137 *
12138 * @param a_pfnCImpl The pointer to the C routine.
12139 * @param a0 The argument.
12140 */
12141#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12142
12143/**
12144 * Defers the entire instruction emulation to a C implementation routine and
12145 * returns, taking two arguments in addition to the standard ones.
12146 *
12147 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12148 *
12149 * @param a_pfnCImpl The pointer to the C routine.
12150 * @param a0 The first extra argument.
12151 * @param a1 The second extra argument.
12152 */
12153#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12154
12155/**
12156 * Defers the entire instruction emulation to a C implementation routine and
12157 * returns, taking three arguments in addition to the standard ones.
12158 *
12159 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12160 *
12161 * @param a_pfnCImpl The pointer to the C routine.
12162 * @param a0 The first extra argument.
12163 * @param a1 The second extra argument.
12164 * @param a2 The third extra argument.
12165 */
12166#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12167
12168/**
12169 * Calls a FPU assembly implementation taking one visible argument.
12170 *
12171 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12172 * @param a0 The first extra argument.
12173 */
12174#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12175 do { \
12176 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12177 } while (0)
12178
12179/**
12180 * Calls a FPU assembly implementation taking two visible arguments.
12181 *
12182 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12183 * @param a0 The first extra argument.
12184 * @param a1 The second extra argument.
12185 */
12186#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12187 do { \
12188 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12189 } while (0)
12190
12191/**
12192 * Calls a FPU assembly implementation taking three visible arguments.
12193 *
12194 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12195 * @param a0 The first extra argument.
12196 * @param a1 The second extra argument.
12197 * @param a2 The third extra argument.
12198 */
12199#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12200 do { \
12201 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12202 } while (0)
12203
12204#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12205 do { \
12206 (a_FpuData).FSW = (a_FSW); \
12207 (a_FpuData).r80Result = *(a_pr80Value); \
12208 } while (0)
12209
12210/** Pushes FPU result onto the stack. */
12211#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12212 iemFpuPushResult(pVCpu, &a_FpuData)
12213/** Pushes FPU result onto the stack and sets the FPUDP. */
12214#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12215 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12216
12217/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12218#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12219 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12220
12221/** Stores FPU result in a stack register. */
12222#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12223 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12224/** Stores FPU result in a stack register and pops the stack. */
12225#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12226 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12227/** Stores FPU result in a stack register and sets the FPUDP. */
12228#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12229 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12230/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12231 * stack. */
12232#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12233 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12234
12235/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12236#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12237 iemFpuUpdateOpcodeAndIp(pVCpu)
12238/** Free a stack register (for FFREE and FFREEP). */
12239#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12240 iemFpuStackFree(pVCpu, a_iStReg)
12241/** Increment the FPU stack pointer. */
12242#define IEM_MC_FPU_STACK_INC_TOP() \
12243 iemFpuStackIncTop(pVCpu)
12244/** Decrement the FPU stack pointer. */
12245#define IEM_MC_FPU_STACK_DEC_TOP() \
12246 iemFpuStackDecTop(pVCpu)
12247
12248/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12249#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12250 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12251/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12252#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12253 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12254/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12255#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12256 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12257/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12258#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12259 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12260/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12261 * stack. */
12262#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12263 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12264/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12265#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12266 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12267
12268/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12269#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12270 iemFpuStackUnderflow(pVCpu, a_iStDst)
12271/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12272 * stack. */
12273#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12274 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12275/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12276 * FPUDS. */
12277#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12278 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12279/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12280 * FPUDS. Pops stack. */
12281#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12282 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12283/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12284 * stack twice. */
12285#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12286 iemFpuStackUnderflowThenPopPop(pVCpu)
12287/** Raises a FPU stack underflow exception for an instruction pushing a result
12288 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12289#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12290 iemFpuStackPushUnderflow(pVCpu)
12291/** Raises a FPU stack underflow exception for an instruction pushing a result
12292 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12293#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12294 iemFpuStackPushUnderflowTwo(pVCpu)
12295
12296/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12297 * FPUIP, FPUCS and FOP. */
12298#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12299 iemFpuStackPushOverflow(pVCpu)
12300/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12301 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12302#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12303 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12304/** Prepares for using the FPU state.
12305 * Ensures that we can use the host FPU in the current context (RC+R0.
12306 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12307#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12308/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12309#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12310/** Actualizes the guest FPU state so it can be accessed and modified. */
12311#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12312
12313/** Prepares for using the SSE state.
12314 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12315 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12316#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12317/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12318#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12319/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12320#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12321
12322/** Prepares for using the AVX state.
12323 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12324 * Ensures the guest AVX state in the CPUMCTX is up to date.
12325 * @note This will include the AVX512 state too when support for it is added
12326 * due to the zero extending feature of VEX instruction. */
12327#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12328/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12329#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12330/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12331#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12332
12333/**
12334 * Calls a MMX assembly implementation taking two visible arguments.
12335 *
12336 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12337 * @param a0 The first extra argument.
12338 * @param a1 The second extra argument.
12339 */
12340#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12341 do { \
12342 IEM_MC_PREPARE_FPU_USAGE(); \
12343 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12344 } while (0)
12345
12346/**
12347 * Calls a MMX assembly implementation taking three visible arguments.
12348 *
12349 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12350 * @param a0 The first extra argument.
12351 * @param a1 The second extra argument.
12352 * @param a2 The third extra argument.
12353 */
12354#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12355 do { \
12356 IEM_MC_PREPARE_FPU_USAGE(); \
12357 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12358 } while (0)
12359
12360
12361/**
12362 * Calls a SSE assembly implementation taking two visible arguments.
12363 *
12364 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12365 * @param a0 The first extra argument.
12366 * @param a1 The second extra argument.
12367 */
12368#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12369 do { \
12370 IEM_MC_PREPARE_SSE_USAGE(); \
12371 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12372 } while (0)
12373
12374/**
12375 * Calls a SSE assembly implementation taking three visible arguments.
12376 *
12377 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12378 * @param a0 The first extra argument.
12379 * @param a1 The second extra argument.
12380 * @param a2 The third extra argument.
12381 */
12382#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12383 do { \
12384 IEM_MC_PREPARE_SSE_USAGE(); \
12385 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12386 } while (0)
12387
12388
12389/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12390 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12391#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12392 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0)
12393
12394/**
12395 * Calls a AVX assembly implementation taking two visible arguments.
12396 *
12397 * There is one implicit zero'th argument, a pointer to the extended state.
12398 *
12399 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12400 * @param a1 The first extra argument.
12401 * @param a2 The second extra argument.
12402 */
12403#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12404 do { \
12405 IEM_MC_PREPARE_AVX_USAGE(); \
12406 a_pfnAImpl(pXState, (a1), (a2)); \
12407 } while (0)
12408
12409/**
12410 * Calls a AVX assembly implementation taking three visible arguments.
12411 *
12412 * There is one implicit zero'th argument, a pointer to the extended state.
12413 *
12414 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12415 * @param a1 The first extra argument.
12416 * @param a2 The second extra argument.
12417 * @param a3 The third extra argument.
12418 */
12419#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12420 do { \
12421 IEM_MC_PREPARE_AVX_USAGE(); \
12422 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12423 } while (0)
12424
12425/** @note Not for IOPL or IF testing. */
12426#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12427/** @note Not for IOPL or IF testing. */
12428#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12429/** @note Not for IOPL or IF testing. */
12430#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12431/** @note Not for IOPL or IF testing. */
12432#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12433/** @note Not for IOPL or IF testing. */
12434#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12435 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12436 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12437/** @note Not for IOPL or IF testing. */
12438#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12439 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12440 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12441/** @note Not for IOPL or IF testing. */
12442#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12443 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12444 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12445 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12446/** @note Not for IOPL or IF testing. */
12447#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12448 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12449 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12450 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12451#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12452#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12453#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12454/** @note Not for IOPL or IF testing. */
12455#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12456 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12457 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12458/** @note Not for IOPL or IF testing. */
12459#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12460 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12461 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12462/** @note Not for IOPL or IF testing. */
12463#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12464 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12465 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12466/** @note Not for IOPL or IF testing. */
12467#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12468 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12469 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12470/** @note Not for IOPL or IF testing. */
12471#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12472 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12473 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12474/** @note Not for IOPL or IF testing. */
12475#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12476 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12477 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12478#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12479#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12480
12481#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12482 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12483#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12484 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12485#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12486 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12487#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12488 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12489#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12490 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12491#define IEM_MC_IF_FCW_IM() \
12492 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12493
12494#define IEM_MC_ELSE() } else {
12495#define IEM_MC_ENDIF() } do {} while (0)
12496
12497/** @} */
12498
12499
12500/** @name Opcode Debug Helpers.
12501 * @{
12502 */
12503#ifdef VBOX_WITH_STATISTICS
12504# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12505#else
12506# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12507#endif
12508
12509#ifdef DEBUG
12510# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12511 do { \
12512 IEMOP_INC_STATS(a_Stats); \
12513 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12514 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12515 } while (0)
12516
12517# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12518 do { \
12519 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12520 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12521 (void)RT_CONCAT(OP_,a_Upper); \
12522 (void)(a_fDisHints); \
12523 (void)(a_fIemHints); \
12524 } while (0)
12525
12526# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12527 do { \
12528 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12529 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12530 (void)RT_CONCAT(OP_,a_Upper); \
12531 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12532 (void)(a_fDisHints); \
12533 (void)(a_fIemHints); \
12534 } while (0)
12535
12536# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12537 do { \
12538 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12539 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12540 (void)RT_CONCAT(OP_,a_Upper); \
12541 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12542 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12543 (void)(a_fDisHints); \
12544 (void)(a_fIemHints); \
12545 } while (0)
12546
12547# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12548 do { \
12549 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12550 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12551 (void)RT_CONCAT(OP_,a_Upper); \
12552 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12553 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12554 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12555 (void)(a_fDisHints); \
12556 (void)(a_fIemHints); \
12557 } while (0)
12558
12559# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12560 do { \
12561 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12562 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12563 (void)RT_CONCAT(OP_,a_Upper); \
12564 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12565 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12566 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12567 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12568 (void)(a_fDisHints); \
12569 (void)(a_fIemHints); \
12570 } while (0)
12571
12572#else
12573# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12574
12575# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12576 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12577# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12578 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12579# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12580 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12581# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12582 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12583# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12584 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12585
12586#endif
12587
12588#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12589 IEMOP_MNEMONIC0EX(a_Lower, \
12590 #a_Lower, \
12591 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12592#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12593 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12594 #a_Lower " " #a_Op1, \
12595 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12596#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12597 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12598 #a_Lower " " #a_Op1 "," #a_Op2, \
12599 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12600#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12601 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12602 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12603 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12604#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12605 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12606 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12607 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12608
12609/** @} */
12610
12611
12612/** @name Opcode Helpers.
12613 * @{
12614 */
12615
12616#ifdef IN_RING3
12617# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12618 do { \
12619 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12620 else \
12621 { \
12622 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12623 return IEMOP_RAISE_INVALID_OPCODE(); \
12624 } \
12625 } while (0)
12626#else
12627# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12628 do { \
12629 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12630 else return IEMOP_RAISE_INVALID_OPCODE(); \
12631 } while (0)
12632#endif
12633
12634/** The instruction requires a 186 or later. */
12635#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12636# define IEMOP_HLP_MIN_186() do { } while (0)
12637#else
12638# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12639#endif
12640
12641/** The instruction requires a 286 or later. */
12642#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12643# define IEMOP_HLP_MIN_286() do { } while (0)
12644#else
12645# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12646#endif
12647
12648/** The instruction requires a 386 or later. */
12649#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12650# define IEMOP_HLP_MIN_386() do { } while (0)
12651#else
12652# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12653#endif
12654
12655/** The instruction requires a 386 or later if the given expression is true. */
12656#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12657# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12658#else
12659# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12660#endif
12661
12662/** The instruction requires a 486 or later. */
12663#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12664# define IEMOP_HLP_MIN_486() do { } while (0)
12665#else
12666# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12667#endif
12668
12669/** The instruction requires a Pentium (586) or later. */
12670#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12671# define IEMOP_HLP_MIN_586() do { } while (0)
12672#else
12673# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12674#endif
12675
12676/** The instruction requires a PentiumPro (686) or later. */
12677#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12678# define IEMOP_HLP_MIN_686() do { } while (0)
12679#else
12680# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12681#endif
12682
12683
12684/** The instruction raises an \#UD in real and V8086 mode. */
12685#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12686 do \
12687 { \
12688 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12689 else return IEMOP_RAISE_INVALID_OPCODE(); \
12690 } while (0)
12691
12692/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12693 * 64-bit mode. */
12694#define IEMOP_HLP_NO_64BIT() \
12695 do \
12696 { \
12697 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12698 return IEMOP_RAISE_INVALID_OPCODE(); \
12699 } while (0)
12700
12701/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12702 * 64-bit mode. */
12703#define IEMOP_HLP_ONLY_64BIT() \
12704 do \
12705 { \
12706 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12707 return IEMOP_RAISE_INVALID_OPCODE(); \
12708 } while (0)
12709
12710/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12711#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12712 do \
12713 { \
12714 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12715 iemRecalEffOpSize64Default(pVCpu); \
12716 } while (0)
12717
12718/** The instruction has 64-bit operand size if 64-bit mode. */
12719#define IEMOP_HLP_64BIT_OP_SIZE() \
12720 do \
12721 { \
12722 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12723 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12724 } while (0)
12725
12726/** Only a REX prefix immediately preceeding the first opcode byte takes
12727 * effect. This macro helps ensuring this as well as logging bad guest code. */
12728#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12729 do \
12730 { \
12731 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12732 { \
12733 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12734 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12735 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12736 pVCpu->iem.s.uRexB = 0; \
12737 pVCpu->iem.s.uRexIndex = 0; \
12738 pVCpu->iem.s.uRexReg = 0; \
12739 iemRecalEffOpSize(pVCpu); \
12740 } \
12741 } while (0)
12742
12743/**
12744 * Done decoding.
12745 */
12746#define IEMOP_HLP_DONE_DECODING() \
12747 do \
12748 { \
12749 /*nothing for now, maybe later... */ \
12750 } while (0)
12751
12752/**
12753 * Done decoding, raise \#UD exception if lock prefix present.
12754 */
12755#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12756 do \
12757 { \
12758 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12759 { /* likely */ } \
12760 else \
12761 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12762 } while (0)
12763
12764
12765/**
12766 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12767 * repnz or size prefixes are present, or if in real or v8086 mode.
12768 */
12769#define IEMOP_HLP_DONE_VEX_DECODING() \
12770 do \
12771 { \
12772 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12773 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12774 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12775 { /* likely */ } \
12776 else \
12777 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12778 } while (0)
12779
12780/**
12781 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12782 * repnz or size prefixes are present, or if in real or v8086 mode.
12783 */
12784#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12785 do \
12786 { \
12787 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12788 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12789 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12790 && pVCpu->iem.s.uVexLength == 0)) \
12791 { /* likely */ } \
12792 else \
12793 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12794 } while (0)
12795
12796
12797/**
12798 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12799 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12800 * register 0, or if in real or v8086 mode.
12801 */
12802#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12803 do \
12804 { \
12805 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12806 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12807 && !pVCpu->iem.s.uVex3rdReg \
12808 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12809 { /* likely */ } \
12810 else \
12811 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12812 } while (0)
12813
12814/**
12815 * Done decoding VEX, no V, L=0.
12816 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12817 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12818 */
12819#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12820 do \
12821 { \
12822 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12823 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12824 && pVCpu->iem.s.uVexLength == 0 \
12825 && pVCpu->iem.s.uVex3rdReg == 0 \
12826 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12827 { /* likely */ } \
12828 else \
12829 return IEMOP_RAISE_INVALID_OPCODE(); \
12830 } while (0)
12831
12832#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12833 do \
12834 { \
12835 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12836 { /* likely */ } \
12837 else \
12838 { \
12839 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12840 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12841 } \
12842 } while (0)
12843#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12844 do \
12845 { \
12846 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12847 { /* likely */ } \
12848 else \
12849 { \
12850 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12851 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12852 } \
12853 } while (0)
12854
12855/**
12856 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12857 * are present.
12858 */
12859#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12860 do \
12861 { \
12862 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12863 { /* likely */ } \
12864 else \
12865 return IEMOP_RAISE_INVALID_OPCODE(); \
12866 } while (0)
12867
12868
12869#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
12870/** Check and handles SVM nested-guest instruction intercept and updates
12871 * NRIP if needed. */
12872# define IEMOP_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12873 do \
12874 { \
12875 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12876 { \
12877 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
12878 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12879 } \
12880 } while (0)
12881
12882/** Check and handle SVM nested-guest CR0 read intercept. */
12883# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12884 do \
12885 { \
12886 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12887 { \
12888 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
12889 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12890 } \
12891 } while (0)
12892
12893#else /* !VBOX_WITH_NESTED_HWVIRT_SVM */
12894# define IEMOP_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12895# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12896#endif /* !VBOX_WITH_NESTED_HWVIRT_SVM */
12897
12898
12899/**
12900 * Calculates the effective address of a ModR/M memory operand.
12901 *
12902 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12903 *
12904 * @return Strict VBox status code.
12905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12906 * @param bRm The ModRM byte.
12907 * @param cbImm The size of any immediate following the
12908 * effective address opcode bytes. Important for
12909 * RIP relative addressing.
12910 * @param pGCPtrEff Where to return the effective address.
12911 */
12912IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12913{
12914 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12915 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12916# define SET_SS_DEF() \
12917 do \
12918 { \
12919 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12920 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12921 } while (0)
12922
12923 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12924 {
12925/** @todo Check the effective address size crap! */
12926 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12927 {
12928 uint16_t u16EffAddr;
12929
12930 /* Handle the disp16 form with no registers first. */
12931 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12932 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12933 else
12934 {
12935 /* Get the displacment. */
12936 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12937 {
12938 case 0: u16EffAddr = 0; break;
12939 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12940 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12941 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12942 }
12943
12944 /* Add the base and index registers to the disp. */
12945 switch (bRm & X86_MODRM_RM_MASK)
12946 {
12947 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12948 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12949 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12950 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12951 case 4: u16EffAddr += pCtx->si; break;
12952 case 5: u16EffAddr += pCtx->di; break;
12953 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12954 case 7: u16EffAddr += pCtx->bx; break;
12955 }
12956 }
12957
12958 *pGCPtrEff = u16EffAddr;
12959 }
12960 else
12961 {
12962 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12963 uint32_t u32EffAddr;
12964
12965 /* Handle the disp32 form with no registers first. */
12966 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12967 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12968 else
12969 {
12970 /* Get the register (or SIB) value. */
12971 switch ((bRm & X86_MODRM_RM_MASK))
12972 {
12973 case 0: u32EffAddr = pCtx->eax; break;
12974 case 1: u32EffAddr = pCtx->ecx; break;
12975 case 2: u32EffAddr = pCtx->edx; break;
12976 case 3: u32EffAddr = pCtx->ebx; break;
12977 case 4: /* SIB */
12978 {
12979 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12980
12981 /* Get the index and scale it. */
12982 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12983 {
12984 case 0: u32EffAddr = pCtx->eax; break;
12985 case 1: u32EffAddr = pCtx->ecx; break;
12986 case 2: u32EffAddr = pCtx->edx; break;
12987 case 3: u32EffAddr = pCtx->ebx; break;
12988 case 4: u32EffAddr = 0; /*none */ break;
12989 case 5: u32EffAddr = pCtx->ebp; break;
12990 case 6: u32EffAddr = pCtx->esi; break;
12991 case 7: u32EffAddr = pCtx->edi; break;
12992 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12993 }
12994 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12995
12996 /* add base */
12997 switch (bSib & X86_SIB_BASE_MASK)
12998 {
12999 case 0: u32EffAddr += pCtx->eax; break;
13000 case 1: u32EffAddr += pCtx->ecx; break;
13001 case 2: u32EffAddr += pCtx->edx; break;
13002 case 3: u32EffAddr += pCtx->ebx; break;
13003 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13004 case 5:
13005 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13006 {
13007 u32EffAddr += pCtx->ebp;
13008 SET_SS_DEF();
13009 }
13010 else
13011 {
13012 uint32_t u32Disp;
13013 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13014 u32EffAddr += u32Disp;
13015 }
13016 break;
13017 case 6: u32EffAddr += pCtx->esi; break;
13018 case 7: u32EffAddr += pCtx->edi; break;
13019 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13020 }
13021 break;
13022 }
13023 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13024 case 6: u32EffAddr = pCtx->esi; break;
13025 case 7: u32EffAddr = pCtx->edi; break;
13026 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13027 }
13028
13029 /* Get and add the displacement. */
13030 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13031 {
13032 case 0:
13033 break;
13034 case 1:
13035 {
13036 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13037 u32EffAddr += i8Disp;
13038 break;
13039 }
13040 case 2:
13041 {
13042 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13043 u32EffAddr += u32Disp;
13044 break;
13045 }
13046 default:
13047 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13048 }
13049
13050 }
13051 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13052 *pGCPtrEff = u32EffAddr;
13053 else
13054 {
13055 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13056 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13057 }
13058 }
13059 }
13060 else
13061 {
13062 uint64_t u64EffAddr;
13063
13064 /* Handle the rip+disp32 form with no registers first. */
13065 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13066 {
13067 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13068 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13069 }
13070 else
13071 {
13072 /* Get the register (or SIB) value. */
13073 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13074 {
13075 case 0: u64EffAddr = pCtx->rax; break;
13076 case 1: u64EffAddr = pCtx->rcx; break;
13077 case 2: u64EffAddr = pCtx->rdx; break;
13078 case 3: u64EffAddr = pCtx->rbx; break;
13079 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13080 case 6: u64EffAddr = pCtx->rsi; break;
13081 case 7: u64EffAddr = pCtx->rdi; break;
13082 case 8: u64EffAddr = pCtx->r8; break;
13083 case 9: u64EffAddr = pCtx->r9; break;
13084 case 10: u64EffAddr = pCtx->r10; break;
13085 case 11: u64EffAddr = pCtx->r11; break;
13086 case 13: u64EffAddr = pCtx->r13; break;
13087 case 14: u64EffAddr = pCtx->r14; break;
13088 case 15: u64EffAddr = pCtx->r15; break;
13089 /* SIB */
13090 case 4:
13091 case 12:
13092 {
13093 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13094
13095 /* Get the index and scale it. */
13096 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13097 {
13098 case 0: u64EffAddr = pCtx->rax; break;
13099 case 1: u64EffAddr = pCtx->rcx; break;
13100 case 2: u64EffAddr = pCtx->rdx; break;
13101 case 3: u64EffAddr = pCtx->rbx; break;
13102 case 4: u64EffAddr = 0; /*none */ break;
13103 case 5: u64EffAddr = pCtx->rbp; break;
13104 case 6: u64EffAddr = pCtx->rsi; break;
13105 case 7: u64EffAddr = pCtx->rdi; break;
13106 case 8: u64EffAddr = pCtx->r8; break;
13107 case 9: u64EffAddr = pCtx->r9; break;
13108 case 10: u64EffAddr = pCtx->r10; break;
13109 case 11: u64EffAddr = pCtx->r11; break;
13110 case 12: u64EffAddr = pCtx->r12; break;
13111 case 13: u64EffAddr = pCtx->r13; break;
13112 case 14: u64EffAddr = pCtx->r14; break;
13113 case 15: u64EffAddr = pCtx->r15; break;
13114 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13115 }
13116 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13117
13118 /* add base */
13119 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13120 {
13121 case 0: u64EffAddr += pCtx->rax; break;
13122 case 1: u64EffAddr += pCtx->rcx; break;
13123 case 2: u64EffAddr += pCtx->rdx; break;
13124 case 3: u64EffAddr += pCtx->rbx; break;
13125 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13126 case 6: u64EffAddr += pCtx->rsi; break;
13127 case 7: u64EffAddr += pCtx->rdi; break;
13128 case 8: u64EffAddr += pCtx->r8; break;
13129 case 9: u64EffAddr += pCtx->r9; break;
13130 case 10: u64EffAddr += pCtx->r10; break;
13131 case 11: u64EffAddr += pCtx->r11; break;
13132 case 12: u64EffAddr += pCtx->r12; break;
13133 case 14: u64EffAddr += pCtx->r14; break;
13134 case 15: u64EffAddr += pCtx->r15; break;
13135 /* complicated encodings */
13136 case 5:
13137 case 13:
13138 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13139 {
13140 if (!pVCpu->iem.s.uRexB)
13141 {
13142 u64EffAddr += pCtx->rbp;
13143 SET_SS_DEF();
13144 }
13145 else
13146 u64EffAddr += pCtx->r13;
13147 }
13148 else
13149 {
13150 uint32_t u32Disp;
13151 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13152 u64EffAddr += (int32_t)u32Disp;
13153 }
13154 break;
13155 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13156 }
13157 break;
13158 }
13159 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13160 }
13161
13162 /* Get and add the displacement. */
13163 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13164 {
13165 case 0:
13166 break;
13167 case 1:
13168 {
13169 int8_t i8Disp;
13170 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13171 u64EffAddr += i8Disp;
13172 break;
13173 }
13174 case 2:
13175 {
13176 uint32_t u32Disp;
13177 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13178 u64EffAddr += (int32_t)u32Disp;
13179 break;
13180 }
13181 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13182 }
13183
13184 }
13185
13186 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13187 *pGCPtrEff = u64EffAddr;
13188 else
13189 {
13190 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13191 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13192 }
13193 }
13194
13195 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13196 return VINF_SUCCESS;
13197}
13198
13199
13200/**
13201 * Calculates the effective address of a ModR/M memory operand.
13202 *
13203 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13204 *
13205 * @return Strict VBox status code.
13206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13207 * @param bRm The ModRM byte.
13208 * @param cbImm The size of any immediate following the
13209 * effective address opcode bytes. Important for
13210 * RIP relative addressing.
13211 * @param pGCPtrEff Where to return the effective address.
13212 * @param offRsp RSP displacement.
13213 */
13214IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13215{
13216 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13217 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13218# define SET_SS_DEF() \
13219 do \
13220 { \
13221 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13222 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13223 } while (0)
13224
13225 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13226 {
13227/** @todo Check the effective address size crap! */
13228 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13229 {
13230 uint16_t u16EffAddr;
13231
13232 /* Handle the disp16 form with no registers first. */
13233 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13234 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13235 else
13236 {
13237 /* Get the displacment. */
13238 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13239 {
13240 case 0: u16EffAddr = 0; break;
13241 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13242 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13243 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13244 }
13245
13246 /* Add the base and index registers to the disp. */
13247 switch (bRm & X86_MODRM_RM_MASK)
13248 {
13249 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13250 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13251 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13252 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13253 case 4: u16EffAddr += pCtx->si; break;
13254 case 5: u16EffAddr += pCtx->di; break;
13255 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13256 case 7: u16EffAddr += pCtx->bx; break;
13257 }
13258 }
13259
13260 *pGCPtrEff = u16EffAddr;
13261 }
13262 else
13263 {
13264 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13265 uint32_t u32EffAddr;
13266
13267 /* Handle the disp32 form with no registers first. */
13268 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13269 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13270 else
13271 {
13272 /* Get the register (or SIB) value. */
13273 switch ((bRm & X86_MODRM_RM_MASK))
13274 {
13275 case 0: u32EffAddr = pCtx->eax; break;
13276 case 1: u32EffAddr = pCtx->ecx; break;
13277 case 2: u32EffAddr = pCtx->edx; break;
13278 case 3: u32EffAddr = pCtx->ebx; break;
13279 case 4: /* SIB */
13280 {
13281 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13282
13283 /* Get the index and scale it. */
13284 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13285 {
13286 case 0: u32EffAddr = pCtx->eax; break;
13287 case 1: u32EffAddr = pCtx->ecx; break;
13288 case 2: u32EffAddr = pCtx->edx; break;
13289 case 3: u32EffAddr = pCtx->ebx; break;
13290 case 4: u32EffAddr = 0; /*none */ break;
13291 case 5: u32EffAddr = pCtx->ebp; break;
13292 case 6: u32EffAddr = pCtx->esi; break;
13293 case 7: u32EffAddr = pCtx->edi; break;
13294 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13295 }
13296 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13297
13298 /* add base */
13299 switch (bSib & X86_SIB_BASE_MASK)
13300 {
13301 case 0: u32EffAddr += pCtx->eax; break;
13302 case 1: u32EffAddr += pCtx->ecx; break;
13303 case 2: u32EffAddr += pCtx->edx; break;
13304 case 3: u32EffAddr += pCtx->ebx; break;
13305 case 4:
13306 u32EffAddr += pCtx->esp + offRsp;
13307 SET_SS_DEF();
13308 break;
13309 case 5:
13310 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13311 {
13312 u32EffAddr += pCtx->ebp;
13313 SET_SS_DEF();
13314 }
13315 else
13316 {
13317 uint32_t u32Disp;
13318 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13319 u32EffAddr += u32Disp;
13320 }
13321 break;
13322 case 6: u32EffAddr += pCtx->esi; break;
13323 case 7: u32EffAddr += pCtx->edi; break;
13324 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13325 }
13326 break;
13327 }
13328 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13329 case 6: u32EffAddr = pCtx->esi; break;
13330 case 7: u32EffAddr = pCtx->edi; break;
13331 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13332 }
13333
13334 /* Get and add the displacement. */
13335 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13336 {
13337 case 0:
13338 break;
13339 case 1:
13340 {
13341 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13342 u32EffAddr += i8Disp;
13343 break;
13344 }
13345 case 2:
13346 {
13347 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13348 u32EffAddr += u32Disp;
13349 break;
13350 }
13351 default:
13352 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13353 }
13354
13355 }
13356 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13357 *pGCPtrEff = u32EffAddr;
13358 else
13359 {
13360 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13361 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13362 }
13363 }
13364 }
13365 else
13366 {
13367 uint64_t u64EffAddr;
13368
13369 /* Handle the rip+disp32 form with no registers first. */
13370 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13371 {
13372 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13373 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13374 }
13375 else
13376 {
13377 /* Get the register (or SIB) value. */
13378 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13379 {
13380 case 0: u64EffAddr = pCtx->rax; break;
13381 case 1: u64EffAddr = pCtx->rcx; break;
13382 case 2: u64EffAddr = pCtx->rdx; break;
13383 case 3: u64EffAddr = pCtx->rbx; break;
13384 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13385 case 6: u64EffAddr = pCtx->rsi; break;
13386 case 7: u64EffAddr = pCtx->rdi; break;
13387 case 8: u64EffAddr = pCtx->r8; break;
13388 case 9: u64EffAddr = pCtx->r9; break;
13389 case 10: u64EffAddr = pCtx->r10; break;
13390 case 11: u64EffAddr = pCtx->r11; break;
13391 case 13: u64EffAddr = pCtx->r13; break;
13392 case 14: u64EffAddr = pCtx->r14; break;
13393 case 15: u64EffAddr = pCtx->r15; break;
13394 /* SIB */
13395 case 4:
13396 case 12:
13397 {
13398 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13399
13400 /* Get the index and scale it. */
13401 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13402 {
13403 case 0: u64EffAddr = pCtx->rax; break;
13404 case 1: u64EffAddr = pCtx->rcx; break;
13405 case 2: u64EffAddr = pCtx->rdx; break;
13406 case 3: u64EffAddr = pCtx->rbx; break;
13407 case 4: u64EffAddr = 0; /*none */ break;
13408 case 5: u64EffAddr = pCtx->rbp; break;
13409 case 6: u64EffAddr = pCtx->rsi; break;
13410 case 7: u64EffAddr = pCtx->rdi; break;
13411 case 8: u64EffAddr = pCtx->r8; break;
13412 case 9: u64EffAddr = pCtx->r9; break;
13413 case 10: u64EffAddr = pCtx->r10; break;
13414 case 11: u64EffAddr = pCtx->r11; break;
13415 case 12: u64EffAddr = pCtx->r12; break;
13416 case 13: u64EffAddr = pCtx->r13; break;
13417 case 14: u64EffAddr = pCtx->r14; break;
13418 case 15: u64EffAddr = pCtx->r15; break;
13419 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13420 }
13421 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13422
13423 /* add base */
13424 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13425 {
13426 case 0: u64EffAddr += pCtx->rax; break;
13427 case 1: u64EffAddr += pCtx->rcx; break;
13428 case 2: u64EffAddr += pCtx->rdx; break;
13429 case 3: u64EffAddr += pCtx->rbx; break;
13430 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13431 case 6: u64EffAddr += pCtx->rsi; break;
13432 case 7: u64EffAddr += pCtx->rdi; break;
13433 case 8: u64EffAddr += pCtx->r8; break;
13434 case 9: u64EffAddr += pCtx->r9; break;
13435 case 10: u64EffAddr += pCtx->r10; break;
13436 case 11: u64EffAddr += pCtx->r11; break;
13437 case 12: u64EffAddr += pCtx->r12; break;
13438 case 14: u64EffAddr += pCtx->r14; break;
13439 case 15: u64EffAddr += pCtx->r15; break;
13440 /* complicated encodings */
13441 case 5:
13442 case 13:
13443 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13444 {
13445 if (!pVCpu->iem.s.uRexB)
13446 {
13447 u64EffAddr += pCtx->rbp;
13448 SET_SS_DEF();
13449 }
13450 else
13451 u64EffAddr += pCtx->r13;
13452 }
13453 else
13454 {
13455 uint32_t u32Disp;
13456 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13457 u64EffAddr += (int32_t)u32Disp;
13458 }
13459 break;
13460 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13461 }
13462 break;
13463 }
13464 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13465 }
13466
13467 /* Get and add the displacement. */
13468 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13469 {
13470 case 0:
13471 break;
13472 case 1:
13473 {
13474 int8_t i8Disp;
13475 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13476 u64EffAddr += i8Disp;
13477 break;
13478 }
13479 case 2:
13480 {
13481 uint32_t u32Disp;
13482 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13483 u64EffAddr += (int32_t)u32Disp;
13484 break;
13485 }
13486 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13487 }
13488
13489 }
13490
13491 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13492 *pGCPtrEff = u64EffAddr;
13493 else
13494 {
13495 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13496 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13497 }
13498 }
13499
13500 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13501 return VINF_SUCCESS;
13502}
13503
13504
13505#ifdef IEM_WITH_SETJMP
13506/**
13507 * Calculates the effective address of a ModR/M memory operand.
13508 *
13509 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13510 *
13511 * May longjmp on internal error.
13512 *
13513 * @return The effective address.
13514 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13515 * @param bRm The ModRM byte.
13516 * @param cbImm The size of any immediate following the
13517 * effective address opcode bytes. Important for
13518 * RIP relative addressing.
13519 */
13520IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13521{
13522 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13523 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13524# define SET_SS_DEF() \
13525 do \
13526 { \
13527 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13528 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13529 } while (0)
13530
13531 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13532 {
13533/** @todo Check the effective address size crap! */
13534 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13535 {
13536 uint16_t u16EffAddr;
13537
13538 /* Handle the disp16 form with no registers first. */
13539 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13540 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13541 else
13542 {
13543 /* Get the displacment. */
13544 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13545 {
13546 case 0: u16EffAddr = 0; break;
13547 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13548 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13549 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13550 }
13551
13552 /* Add the base and index registers to the disp. */
13553 switch (bRm & X86_MODRM_RM_MASK)
13554 {
13555 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13556 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13557 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13558 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13559 case 4: u16EffAddr += pCtx->si; break;
13560 case 5: u16EffAddr += pCtx->di; break;
13561 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13562 case 7: u16EffAddr += pCtx->bx; break;
13563 }
13564 }
13565
13566 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13567 return u16EffAddr;
13568 }
13569
13570 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13571 uint32_t u32EffAddr;
13572
13573 /* Handle the disp32 form with no registers first. */
13574 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13575 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13576 else
13577 {
13578 /* Get the register (or SIB) value. */
13579 switch ((bRm & X86_MODRM_RM_MASK))
13580 {
13581 case 0: u32EffAddr = pCtx->eax; break;
13582 case 1: u32EffAddr = pCtx->ecx; break;
13583 case 2: u32EffAddr = pCtx->edx; break;
13584 case 3: u32EffAddr = pCtx->ebx; break;
13585 case 4: /* SIB */
13586 {
13587 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13588
13589 /* Get the index and scale it. */
13590 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13591 {
13592 case 0: u32EffAddr = pCtx->eax; break;
13593 case 1: u32EffAddr = pCtx->ecx; break;
13594 case 2: u32EffAddr = pCtx->edx; break;
13595 case 3: u32EffAddr = pCtx->ebx; break;
13596 case 4: u32EffAddr = 0; /*none */ break;
13597 case 5: u32EffAddr = pCtx->ebp; break;
13598 case 6: u32EffAddr = pCtx->esi; break;
13599 case 7: u32EffAddr = pCtx->edi; break;
13600 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13601 }
13602 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13603
13604 /* add base */
13605 switch (bSib & X86_SIB_BASE_MASK)
13606 {
13607 case 0: u32EffAddr += pCtx->eax; break;
13608 case 1: u32EffAddr += pCtx->ecx; break;
13609 case 2: u32EffAddr += pCtx->edx; break;
13610 case 3: u32EffAddr += pCtx->ebx; break;
13611 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13612 case 5:
13613 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13614 {
13615 u32EffAddr += pCtx->ebp;
13616 SET_SS_DEF();
13617 }
13618 else
13619 {
13620 uint32_t u32Disp;
13621 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13622 u32EffAddr += u32Disp;
13623 }
13624 break;
13625 case 6: u32EffAddr += pCtx->esi; break;
13626 case 7: u32EffAddr += pCtx->edi; break;
13627 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13628 }
13629 break;
13630 }
13631 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13632 case 6: u32EffAddr = pCtx->esi; break;
13633 case 7: u32EffAddr = pCtx->edi; break;
13634 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13635 }
13636
13637 /* Get and add the displacement. */
13638 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13639 {
13640 case 0:
13641 break;
13642 case 1:
13643 {
13644 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13645 u32EffAddr += i8Disp;
13646 break;
13647 }
13648 case 2:
13649 {
13650 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13651 u32EffAddr += u32Disp;
13652 break;
13653 }
13654 default:
13655 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13656 }
13657 }
13658
13659 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13660 {
13661 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13662 return u32EffAddr;
13663 }
13664 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13665 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13666 return u32EffAddr & UINT16_MAX;
13667 }
13668
13669 uint64_t u64EffAddr;
13670
13671 /* Handle the rip+disp32 form with no registers first. */
13672 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13673 {
13674 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13675 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13676 }
13677 else
13678 {
13679 /* Get the register (or SIB) value. */
13680 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13681 {
13682 case 0: u64EffAddr = pCtx->rax; break;
13683 case 1: u64EffAddr = pCtx->rcx; break;
13684 case 2: u64EffAddr = pCtx->rdx; break;
13685 case 3: u64EffAddr = pCtx->rbx; break;
13686 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13687 case 6: u64EffAddr = pCtx->rsi; break;
13688 case 7: u64EffAddr = pCtx->rdi; break;
13689 case 8: u64EffAddr = pCtx->r8; break;
13690 case 9: u64EffAddr = pCtx->r9; break;
13691 case 10: u64EffAddr = pCtx->r10; break;
13692 case 11: u64EffAddr = pCtx->r11; break;
13693 case 13: u64EffAddr = pCtx->r13; break;
13694 case 14: u64EffAddr = pCtx->r14; break;
13695 case 15: u64EffAddr = pCtx->r15; break;
13696 /* SIB */
13697 case 4:
13698 case 12:
13699 {
13700 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13701
13702 /* Get the index and scale it. */
13703 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13704 {
13705 case 0: u64EffAddr = pCtx->rax; break;
13706 case 1: u64EffAddr = pCtx->rcx; break;
13707 case 2: u64EffAddr = pCtx->rdx; break;
13708 case 3: u64EffAddr = pCtx->rbx; break;
13709 case 4: u64EffAddr = 0; /*none */ break;
13710 case 5: u64EffAddr = pCtx->rbp; break;
13711 case 6: u64EffAddr = pCtx->rsi; break;
13712 case 7: u64EffAddr = pCtx->rdi; break;
13713 case 8: u64EffAddr = pCtx->r8; break;
13714 case 9: u64EffAddr = pCtx->r9; break;
13715 case 10: u64EffAddr = pCtx->r10; break;
13716 case 11: u64EffAddr = pCtx->r11; break;
13717 case 12: u64EffAddr = pCtx->r12; break;
13718 case 13: u64EffAddr = pCtx->r13; break;
13719 case 14: u64EffAddr = pCtx->r14; break;
13720 case 15: u64EffAddr = pCtx->r15; break;
13721 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13722 }
13723 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13724
13725 /* add base */
13726 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13727 {
13728 case 0: u64EffAddr += pCtx->rax; break;
13729 case 1: u64EffAddr += pCtx->rcx; break;
13730 case 2: u64EffAddr += pCtx->rdx; break;
13731 case 3: u64EffAddr += pCtx->rbx; break;
13732 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13733 case 6: u64EffAddr += pCtx->rsi; break;
13734 case 7: u64EffAddr += pCtx->rdi; break;
13735 case 8: u64EffAddr += pCtx->r8; break;
13736 case 9: u64EffAddr += pCtx->r9; break;
13737 case 10: u64EffAddr += pCtx->r10; break;
13738 case 11: u64EffAddr += pCtx->r11; break;
13739 case 12: u64EffAddr += pCtx->r12; break;
13740 case 14: u64EffAddr += pCtx->r14; break;
13741 case 15: u64EffAddr += pCtx->r15; break;
13742 /* complicated encodings */
13743 case 5:
13744 case 13:
13745 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13746 {
13747 if (!pVCpu->iem.s.uRexB)
13748 {
13749 u64EffAddr += pCtx->rbp;
13750 SET_SS_DEF();
13751 }
13752 else
13753 u64EffAddr += pCtx->r13;
13754 }
13755 else
13756 {
13757 uint32_t u32Disp;
13758 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13759 u64EffAddr += (int32_t)u32Disp;
13760 }
13761 break;
13762 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13763 }
13764 break;
13765 }
13766 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13767 }
13768
13769 /* Get and add the displacement. */
13770 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13771 {
13772 case 0:
13773 break;
13774 case 1:
13775 {
13776 int8_t i8Disp;
13777 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13778 u64EffAddr += i8Disp;
13779 break;
13780 }
13781 case 2:
13782 {
13783 uint32_t u32Disp;
13784 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13785 u64EffAddr += (int32_t)u32Disp;
13786 break;
13787 }
13788 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13789 }
13790
13791 }
13792
13793 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13794 {
13795 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13796 return u64EffAddr;
13797 }
13798 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13799 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13800 return u64EffAddr & UINT32_MAX;
13801}
13802#endif /* IEM_WITH_SETJMP */
13803
13804
13805/** @} */
13806
13807
13808
13809/*
13810 * Include the instructions
13811 */
13812#include "IEMAllInstructions.cpp.h"
13813
13814
13815
13816
13817#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13818
13819/**
13820 * Sets up execution verification mode.
13821 */
13822IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13823{
13824 PVMCPU pVCpu = pVCpu;
13825 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13826
13827 /*
13828 * Always note down the address of the current instruction.
13829 */
13830 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13831 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13832
13833 /*
13834 * Enable verification and/or logging.
13835 */
13836 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13837 if ( fNewNoRem
13838 && ( 0
13839#if 0 /* auto enable on first paged protected mode interrupt */
13840 || ( pOrgCtx->eflags.Bits.u1IF
13841 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13842 && TRPMHasTrap(pVCpu)
13843 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13844#endif
13845#if 0
13846 || ( pOrgCtx->cs == 0x10
13847 && ( pOrgCtx->rip == 0x90119e3e
13848 || pOrgCtx->rip == 0x901d9810)
13849#endif
13850#if 0 /* Auto enable DSL - FPU stuff. */
13851 || ( pOrgCtx->cs == 0x10
13852 && (// pOrgCtx->rip == 0xc02ec07f
13853 //|| pOrgCtx->rip == 0xc02ec082
13854 //|| pOrgCtx->rip == 0xc02ec0c9
13855 0
13856 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13857#endif
13858#if 0 /* Auto enable DSL - fstp st0 stuff. */
13859 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13860#endif
13861#if 0
13862 || pOrgCtx->rip == 0x9022bb3a
13863#endif
13864#if 0
13865 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13866#endif
13867#if 0
13868 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13869 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13870#endif
13871#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13872 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13873 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13874 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13875#endif
13876#if 0 /* NT4SP1 - xadd early boot. */
13877 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13878#endif
13879#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13880 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13881#endif
13882#if 0 /* NT4SP1 - cmpxchg (AMD). */
13883 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13884#endif
13885#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13886 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13887#endif
13888#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13889 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13890
13891#endif
13892#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13893 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13894
13895#endif
13896#if 0 /* NT4SP1 - frstor [ecx] */
13897 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13898#endif
13899#if 0 /* xxxxxx - All long mode code. */
13900 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13901#endif
13902#if 0 /* rep movsq linux 3.7 64-bit boot. */
13903 || (pOrgCtx->rip == 0x0000000000100241)
13904#endif
13905#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13906 || (pOrgCtx->rip == 0x000000000215e240)
13907#endif
13908#if 0 /* DOS's size-overridden iret to v8086. */
13909 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13910#endif
13911 )
13912 )
13913 {
13914 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13915 RTLogFlags(NULL, "enabled");
13916 fNewNoRem = false;
13917 }
13918 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13919 {
13920 pVCpu->iem.s.fNoRem = fNewNoRem;
13921 if (!fNewNoRem)
13922 {
13923 LogAlways(("Enabling verification mode!\n"));
13924 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13925 }
13926 else
13927 LogAlways(("Disabling verification mode!\n"));
13928 }
13929
13930 /*
13931 * Switch state.
13932 */
13933 if (IEM_VERIFICATION_ENABLED(pVCpu))
13934 {
13935 static CPUMCTX s_DebugCtx; /* Ugly! */
13936
13937 s_DebugCtx = *pOrgCtx;
13938 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13939 }
13940
13941 /*
13942 * See if there is an interrupt pending in TRPM and inject it if we can.
13943 */
13944 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13945 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
13946#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13947 bool fIntrEnabled = pOrgCtx->hwvirt.Gif;
13948 if (fIntrEnabled)
13949 {
13950 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
13951 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
13952 else
13953 fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13954 }
13955#else
13956 bool fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;
13957#endif
13958 if ( fIntrEnabled
13959 && TRPMHasTrap(pVCpu)
13960 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13961 {
13962 uint8_t u8TrapNo;
13963 TRPMEVENT enmType;
13964 RTGCUINT uErrCode;
13965 RTGCPTR uCr2;
13966 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13967 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13968 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13969 TRPMResetTrap(pVCpu);
13970 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13971 }
13972
13973 /*
13974 * Reset the counters.
13975 */
13976 pVCpu->iem.s.cIOReads = 0;
13977 pVCpu->iem.s.cIOWrites = 0;
13978 pVCpu->iem.s.fIgnoreRaxRdx = false;
13979 pVCpu->iem.s.fOverlappingMovs = false;
13980 pVCpu->iem.s.fProblematicMemory = false;
13981 pVCpu->iem.s.fUndefinedEFlags = 0;
13982
13983 if (IEM_VERIFICATION_ENABLED(pVCpu))
13984 {
13985 /*
13986 * Free all verification records.
13987 */
13988 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13989 pVCpu->iem.s.pIemEvtRecHead = NULL;
13990 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13991 do
13992 {
13993 while (pEvtRec)
13994 {
13995 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13996 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13997 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13998 pEvtRec = pNext;
13999 }
14000 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
14001 pVCpu->iem.s.pOtherEvtRecHead = NULL;
14002 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
14003 } while (pEvtRec);
14004 }
14005}
14006
14007
14008/**
14009 * Allocate an event record.
14010 * @returns Pointer to a record.
14011 */
14012IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
14013{
14014 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14015 return NULL;
14016
14017 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
14018 if (pEvtRec)
14019 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
14020 else
14021 {
14022 if (!pVCpu->iem.s.ppIemEvtRecNext)
14023 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
14024
14025 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
14026 if (!pEvtRec)
14027 return NULL;
14028 }
14029 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
14030 pEvtRec->pNext = NULL;
14031 return pEvtRec;
14032}
14033
14034
14035/**
14036 * IOMMMIORead notification.
14037 */
14038VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
14039{
14040 PVMCPU pVCpu = VMMGetCpu(pVM);
14041 if (!pVCpu)
14042 return;
14043 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14044 if (!pEvtRec)
14045 return;
14046 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
14047 pEvtRec->u.RamRead.GCPhys = GCPhys;
14048 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
14049 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14050 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14051}
14052
14053
14054/**
14055 * IOMMMIOWrite notification.
14056 */
14057VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
14058{
14059 PVMCPU pVCpu = VMMGetCpu(pVM);
14060 if (!pVCpu)
14061 return;
14062 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14063 if (!pEvtRec)
14064 return;
14065 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
14066 pEvtRec->u.RamWrite.GCPhys = GCPhys;
14067 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
14068 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
14069 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
14070 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
14071 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
14072 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14073 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14074}
14075
14076
14077/**
14078 * IOMIOPortRead notification.
14079 */
14080VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
14081{
14082 PVMCPU pVCpu = VMMGetCpu(pVM);
14083 if (!pVCpu)
14084 return;
14085 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14086 if (!pEvtRec)
14087 return;
14088 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14089 pEvtRec->u.IOPortRead.Port = Port;
14090 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14091 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14092 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14093}
14094
14095/**
14096 * IOMIOPortWrite notification.
14097 */
14098VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14099{
14100 PVMCPU pVCpu = VMMGetCpu(pVM);
14101 if (!pVCpu)
14102 return;
14103 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14104 if (!pEvtRec)
14105 return;
14106 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14107 pEvtRec->u.IOPortWrite.Port = Port;
14108 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14109 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14110 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14111 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14112}
14113
14114
14115VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
14116{
14117 PVMCPU pVCpu = VMMGetCpu(pVM);
14118 if (!pVCpu)
14119 return;
14120 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14121 if (!pEvtRec)
14122 return;
14123 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
14124 pEvtRec->u.IOPortStrRead.Port = Port;
14125 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
14126 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
14127 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14128 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14129}
14130
14131
14132VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
14133{
14134 PVMCPU pVCpu = VMMGetCpu(pVM);
14135 if (!pVCpu)
14136 return;
14137 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14138 if (!pEvtRec)
14139 return;
14140 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
14141 pEvtRec->u.IOPortStrWrite.Port = Port;
14142 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
14143 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
14144 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14145 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14146}
14147
14148
14149/**
14150 * Fakes and records an I/O port read.
14151 *
14152 * @returns VINF_SUCCESS.
14153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14154 * @param Port The I/O port.
14155 * @param pu32Value Where to store the fake value.
14156 * @param cbValue The size of the access.
14157 */
14158IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14159{
14160 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14161 if (pEvtRec)
14162 {
14163 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14164 pEvtRec->u.IOPortRead.Port = Port;
14165 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14166 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14167 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14168 }
14169 pVCpu->iem.s.cIOReads++;
14170 *pu32Value = 0xcccccccc;
14171 return VINF_SUCCESS;
14172}
14173
14174
14175/**
14176 * Fakes and records an I/O port write.
14177 *
14178 * @returns VINF_SUCCESS.
14179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14180 * @param Port The I/O port.
14181 * @param u32Value The value being written.
14182 * @param cbValue The size of the access.
14183 */
14184IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14185{
14186 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14187 if (pEvtRec)
14188 {
14189 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14190 pEvtRec->u.IOPortWrite.Port = Port;
14191 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14192 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14193 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14194 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14195 }
14196 pVCpu->iem.s.cIOWrites++;
14197 return VINF_SUCCESS;
14198}
14199
14200
14201/**
14202 * Used to add extra details about a stub case.
14203 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14204 */
14205IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
14206{
14207 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14208 PVM pVM = pVCpu->CTX_SUFF(pVM);
14209 PVMCPU pVCpu = pVCpu;
14210 char szRegs[4096];
14211 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
14212 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
14213 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
14214 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
14215 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
14216 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
14217 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
14218 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
14219 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
14220 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
14221 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
14222 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
14223 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
14224 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
14225 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
14226 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
14227 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
14228 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
14229 " efer=%016VR{efer}\n"
14230 " pat=%016VR{pat}\n"
14231 " sf_mask=%016VR{sf_mask}\n"
14232 "krnl_gs_base=%016VR{krnl_gs_base}\n"
14233 " lstar=%016VR{lstar}\n"
14234 " star=%016VR{star} cstar=%016VR{cstar}\n"
14235 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
14236 );
14237
14238 char szInstr1[256];
14239 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
14240 DBGF_DISAS_FLAGS_DEFAULT_MODE,
14241 szInstr1, sizeof(szInstr1), NULL);
14242 char szInstr2[256];
14243 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
14244 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14245 szInstr2, sizeof(szInstr2), NULL);
14246
14247 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
14248}
14249
14250
14251/**
14252 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
14253 * dump to the assertion info.
14254 *
14255 * @param pEvtRec The record to dump.
14256 */
14257IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
14258{
14259 switch (pEvtRec->enmEvent)
14260 {
14261 case IEMVERIFYEVENT_IOPORT_READ:
14262 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
14263 pEvtRec->u.IOPortWrite.Port,
14264 pEvtRec->u.IOPortWrite.cbValue);
14265 break;
14266 case IEMVERIFYEVENT_IOPORT_WRITE:
14267 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
14268 pEvtRec->u.IOPortWrite.Port,
14269 pEvtRec->u.IOPortWrite.cbValue,
14270 pEvtRec->u.IOPortWrite.u32Value);
14271 break;
14272 case IEMVERIFYEVENT_IOPORT_STR_READ:
14273 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
14274 pEvtRec->u.IOPortStrWrite.Port,
14275 pEvtRec->u.IOPortStrWrite.cbValue,
14276 pEvtRec->u.IOPortStrWrite.cTransfers);
14277 break;
14278 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14279 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
14280 pEvtRec->u.IOPortStrWrite.Port,
14281 pEvtRec->u.IOPortStrWrite.cbValue,
14282 pEvtRec->u.IOPortStrWrite.cTransfers);
14283 break;
14284 case IEMVERIFYEVENT_RAM_READ:
14285 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
14286 pEvtRec->u.RamRead.GCPhys,
14287 pEvtRec->u.RamRead.cb);
14288 break;
14289 case IEMVERIFYEVENT_RAM_WRITE:
14290 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
14291 pEvtRec->u.RamWrite.GCPhys,
14292 pEvtRec->u.RamWrite.cb,
14293 (int)pEvtRec->u.RamWrite.cb,
14294 pEvtRec->u.RamWrite.ab);
14295 break;
14296 default:
14297 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14298 break;
14299 }
14300}
14301
14302
14303/**
14304 * Raises an assertion on the specified record, showing the given message with
14305 * a record dump attached.
14306 *
14307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14308 * @param pEvtRec1 The first record.
14309 * @param pEvtRec2 The second record.
14310 * @param pszMsg The message explaining why we're asserting.
14311 */
14312IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14313{
14314 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14315 iemVerifyAssertAddRecordDump(pEvtRec1);
14316 iemVerifyAssertAddRecordDump(pEvtRec2);
14317 iemVerifyAssertMsg2(pVCpu);
14318 RTAssertPanic();
14319}
14320
14321
14322/**
14323 * Raises an assertion on the specified record, showing the given message with
14324 * a record dump attached.
14325 *
14326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14327 * @param pEvtRec1 The first record.
14328 * @param pszMsg The message explaining why we're asserting.
14329 */
14330IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14331{
14332 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14333 iemVerifyAssertAddRecordDump(pEvtRec);
14334 iemVerifyAssertMsg2(pVCpu);
14335 RTAssertPanic();
14336}
14337
14338
14339/**
14340 * Verifies a write record.
14341 *
14342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14343 * @param pEvtRec The write record.
14344 * @param fRem Set if REM was doing the other executing. If clear
14345 * it was HM.
14346 */
14347IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14348{
14349 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14350 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14351 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14352 if ( RT_FAILURE(rc)
14353 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14354 {
14355 /* fend off ins */
14356 if ( !pVCpu->iem.s.cIOReads
14357 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14358 || ( pEvtRec->u.RamWrite.cb != 1
14359 && pEvtRec->u.RamWrite.cb != 2
14360 && pEvtRec->u.RamWrite.cb != 4) )
14361 {
14362 /* fend off ROMs and MMIO */
14363 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14364 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14365 {
14366 /* fend off fxsave */
14367 if (pEvtRec->u.RamWrite.cb != 512)
14368 {
14369 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14370 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14371 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14372 RTAssertMsg2Add("%s: %.*Rhxs\n"
14373 "iem: %.*Rhxs\n",
14374 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14375 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14376 iemVerifyAssertAddRecordDump(pEvtRec);
14377 iemVerifyAssertMsg2(pVCpu);
14378 RTAssertPanic();
14379 }
14380 }
14381 }
14382 }
14383
14384}
14385
14386/**
14387 * Performs the post-execution verfication checks.
14388 */
14389IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14390{
14391 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14392 return rcStrictIem;
14393
14394 /*
14395 * Switch back the state.
14396 */
14397 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14398 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14399 Assert(pOrgCtx != pDebugCtx);
14400 IEM_GET_CTX(pVCpu) = pOrgCtx;
14401
14402 /*
14403 * Execute the instruction in REM.
14404 */
14405 bool fRem = false;
14406 PVM pVM = pVCpu->CTX_SUFF(pVM);
14407 PVMCPU pVCpu = pVCpu;
14408 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14409#ifdef IEM_VERIFICATION_MODE_FULL_HM
14410 if ( HMIsEnabled(pVM)
14411 && pVCpu->iem.s.cIOReads == 0
14412 && pVCpu->iem.s.cIOWrites == 0
14413 && !pVCpu->iem.s.fProblematicMemory)
14414 {
14415 uint64_t uStartRip = pOrgCtx->rip;
14416 unsigned iLoops = 0;
14417 do
14418 {
14419 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14420 iLoops++;
14421 } while ( rc == VINF_SUCCESS
14422 || ( rc == VINF_EM_DBG_STEPPED
14423 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14424 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14425 || ( pOrgCtx->rip != pDebugCtx->rip
14426 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14427 && iLoops < 8) );
14428 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14429 rc = VINF_SUCCESS;
14430 }
14431#endif
14432 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14433 || rc == VINF_IOM_R3_IOPORT_READ
14434 || rc == VINF_IOM_R3_IOPORT_WRITE
14435 || rc == VINF_IOM_R3_MMIO_READ
14436 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14437 || rc == VINF_IOM_R3_MMIO_WRITE
14438 || rc == VINF_CPUM_R3_MSR_READ
14439 || rc == VINF_CPUM_R3_MSR_WRITE
14440 || rc == VINF_EM_RESCHEDULE
14441 )
14442 {
14443 EMRemLock(pVM);
14444 rc = REMR3EmulateInstruction(pVM, pVCpu);
14445 AssertRC(rc);
14446 EMRemUnlock(pVM);
14447 fRem = true;
14448 }
14449
14450# if 1 /* Skip unimplemented instructions for now. */
14451 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14452 {
14453 IEM_GET_CTX(pVCpu) = pOrgCtx;
14454 if (rc == VINF_EM_DBG_STEPPED)
14455 return VINF_SUCCESS;
14456 return rc;
14457 }
14458# endif
14459
14460 /*
14461 * Compare the register states.
14462 */
14463 unsigned cDiffs = 0;
14464 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14465 {
14466 //Log(("REM and IEM ends up with different registers!\n"));
14467 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14468
14469# define CHECK_FIELD(a_Field) \
14470 do \
14471 { \
14472 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14473 { \
14474 switch (sizeof(pOrgCtx->a_Field)) \
14475 { \
14476 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14477 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14478 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14479 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14480 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14481 } \
14482 cDiffs++; \
14483 } \
14484 } while (0)
14485# define CHECK_XSTATE_FIELD(a_Field) \
14486 do \
14487 { \
14488 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14489 { \
14490 switch (sizeof(pOrgXState->a_Field)) \
14491 { \
14492 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14493 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14494 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14495 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14496 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14497 } \
14498 cDiffs++; \
14499 } \
14500 } while (0)
14501
14502# define CHECK_BIT_FIELD(a_Field) \
14503 do \
14504 { \
14505 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14506 { \
14507 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14508 cDiffs++; \
14509 } \
14510 } while (0)
14511
14512# define CHECK_SEL(a_Sel) \
14513 do \
14514 { \
14515 CHECK_FIELD(a_Sel.Sel); \
14516 CHECK_FIELD(a_Sel.Attr.u); \
14517 CHECK_FIELD(a_Sel.u64Base); \
14518 CHECK_FIELD(a_Sel.u32Limit); \
14519 CHECK_FIELD(a_Sel.fFlags); \
14520 } while (0)
14521
14522 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14523 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14524
14525#if 1 /* The recompiler doesn't update these the intel way. */
14526 if (fRem)
14527 {
14528 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14529 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14530 pOrgXState->x87.CS = pDebugXState->x87.CS;
14531 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14532 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14533 pOrgXState->x87.DS = pDebugXState->x87.DS;
14534 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14535 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14536 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14537 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14538 }
14539#endif
14540 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14541 {
14542 RTAssertMsg2Weak(" the FPU state differs\n");
14543 cDiffs++;
14544 CHECK_XSTATE_FIELD(x87.FCW);
14545 CHECK_XSTATE_FIELD(x87.FSW);
14546 CHECK_XSTATE_FIELD(x87.FTW);
14547 CHECK_XSTATE_FIELD(x87.FOP);
14548 CHECK_XSTATE_FIELD(x87.FPUIP);
14549 CHECK_XSTATE_FIELD(x87.CS);
14550 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14551 CHECK_XSTATE_FIELD(x87.FPUDP);
14552 CHECK_XSTATE_FIELD(x87.DS);
14553 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14554 CHECK_XSTATE_FIELD(x87.MXCSR);
14555 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14556 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14557 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14558 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14559 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14560 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14561 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14562 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14563 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14564 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14565 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14566 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14567 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14568 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14569 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14570 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14571 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14572 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14573 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14574 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14575 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14576 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14577 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14578 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14579 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14580 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14581 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14582 }
14583 CHECK_FIELD(rip);
14584 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14585 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14586 {
14587 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14588 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14589 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14590 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14591 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14592 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14593 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14594 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14595 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14596 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14597 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14598 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14599 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14600 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14601 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14602 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14603 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14604 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14605 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14606 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14607 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14608 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14609 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14610 }
14611
14612 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14613 CHECK_FIELD(rax);
14614 CHECK_FIELD(rcx);
14615 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14616 CHECK_FIELD(rdx);
14617 CHECK_FIELD(rbx);
14618 CHECK_FIELD(rsp);
14619 CHECK_FIELD(rbp);
14620 CHECK_FIELD(rsi);
14621 CHECK_FIELD(rdi);
14622 CHECK_FIELD(r8);
14623 CHECK_FIELD(r9);
14624 CHECK_FIELD(r10);
14625 CHECK_FIELD(r11);
14626 CHECK_FIELD(r12);
14627 CHECK_FIELD(r13);
14628 CHECK_SEL(cs);
14629 CHECK_SEL(ss);
14630 CHECK_SEL(ds);
14631 CHECK_SEL(es);
14632 CHECK_SEL(fs);
14633 CHECK_SEL(gs);
14634 CHECK_FIELD(cr0);
14635
14636 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14637 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14638 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14639 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14640 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14641 {
14642 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14643 { /* ignore */ }
14644 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14645 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14646 && fRem)
14647 { /* ignore */ }
14648 else
14649 CHECK_FIELD(cr2);
14650 }
14651 CHECK_FIELD(cr3);
14652 CHECK_FIELD(cr4);
14653 CHECK_FIELD(dr[0]);
14654 CHECK_FIELD(dr[1]);
14655 CHECK_FIELD(dr[2]);
14656 CHECK_FIELD(dr[3]);
14657 CHECK_FIELD(dr[6]);
14658 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14659 CHECK_FIELD(dr[7]);
14660 CHECK_FIELD(gdtr.cbGdt);
14661 CHECK_FIELD(gdtr.pGdt);
14662 CHECK_FIELD(idtr.cbIdt);
14663 CHECK_FIELD(idtr.pIdt);
14664 CHECK_SEL(ldtr);
14665 CHECK_SEL(tr);
14666 CHECK_FIELD(SysEnter.cs);
14667 CHECK_FIELD(SysEnter.eip);
14668 CHECK_FIELD(SysEnter.esp);
14669 CHECK_FIELD(msrEFER);
14670 CHECK_FIELD(msrSTAR);
14671 CHECK_FIELD(msrPAT);
14672 CHECK_FIELD(msrLSTAR);
14673 CHECK_FIELD(msrCSTAR);
14674 CHECK_FIELD(msrSFMASK);
14675 CHECK_FIELD(msrKERNELGSBASE);
14676
14677 if (cDiffs != 0)
14678 {
14679 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14680 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14681 RTAssertPanic();
14682 static bool volatile s_fEnterDebugger = true;
14683 if (s_fEnterDebugger)
14684 DBGFSTOP(pVM);
14685
14686# if 1 /* Ignore unimplemented instructions for now. */
14687 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14688 rcStrictIem = VINF_SUCCESS;
14689# endif
14690 }
14691# undef CHECK_FIELD
14692# undef CHECK_BIT_FIELD
14693 }
14694
14695 /*
14696 * If the register state compared fine, check the verification event
14697 * records.
14698 */
14699 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14700 {
14701 /*
14702 * Compare verficiation event records.
14703 * - I/O port accesses should be a 1:1 match.
14704 */
14705 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14706 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14707 while (pIemRec && pOtherRec)
14708 {
14709 /* Since we might miss RAM writes and reads, ignore reads and check
14710 that any written memory is the same extra ones. */
14711 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14712 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14713 && pIemRec->pNext)
14714 {
14715 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14716 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14717 pIemRec = pIemRec->pNext;
14718 }
14719
14720 /* Do the compare. */
14721 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14722 {
14723 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14724 break;
14725 }
14726 bool fEquals;
14727 switch (pIemRec->enmEvent)
14728 {
14729 case IEMVERIFYEVENT_IOPORT_READ:
14730 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14731 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14732 break;
14733 case IEMVERIFYEVENT_IOPORT_WRITE:
14734 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14735 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14736 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14737 break;
14738 case IEMVERIFYEVENT_IOPORT_STR_READ:
14739 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14740 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14741 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14742 break;
14743 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14744 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14745 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14746 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14747 break;
14748 case IEMVERIFYEVENT_RAM_READ:
14749 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14750 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14751 break;
14752 case IEMVERIFYEVENT_RAM_WRITE:
14753 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14754 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14755 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14756 break;
14757 default:
14758 fEquals = false;
14759 break;
14760 }
14761 if (!fEquals)
14762 {
14763 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14764 break;
14765 }
14766
14767 /* advance */
14768 pIemRec = pIemRec->pNext;
14769 pOtherRec = pOtherRec->pNext;
14770 }
14771
14772 /* Ignore extra writes and reads. */
14773 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14774 {
14775 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14776 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14777 pIemRec = pIemRec->pNext;
14778 }
14779 if (pIemRec != NULL)
14780 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14781 else if (pOtherRec != NULL)
14782 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14783 }
14784 IEM_GET_CTX(pVCpu) = pOrgCtx;
14785
14786 return rcStrictIem;
14787}
14788
14789#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14790
14791/* stubs */
14792IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14793{
14794 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14795 return VERR_INTERNAL_ERROR;
14796}
14797
14798IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14799{
14800 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14801 return VERR_INTERNAL_ERROR;
14802}
14803
14804#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14805
14806
14807#ifdef LOG_ENABLED
14808/**
14809 * Logs the current instruction.
14810 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14811 * @param pCtx The current CPU context.
14812 * @param fSameCtx Set if we have the same context information as the VMM,
14813 * clear if we may have already executed an instruction in
14814 * our debug context. When clear, we assume IEMCPU holds
14815 * valid CPU mode info.
14816 */
14817IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14818{
14819# ifdef IN_RING3
14820 if (LogIs2Enabled())
14821 {
14822 char szInstr[256];
14823 uint32_t cbInstr = 0;
14824 if (fSameCtx)
14825 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14826 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14827 szInstr, sizeof(szInstr), &cbInstr);
14828 else
14829 {
14830 uint32_t fFlags = 0;
14831 switch (pVCpu->iem.s.enmCpuMode)
14832 {
14833 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14834 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14835 case IEMMODE_16BIT:
14836 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14837 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14838 else
14839 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14840 break;
14841 }
14842 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14843 szInstr, sizeof(szInstr), &cbInstr);
14844 }
14845
14846 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14847 Log2(("****\n"
14848 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14849 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14850 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14851 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14852 " %s\n"
14853 ,
14854 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14855 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14856 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14857 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14858 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14859 szInstr));
14860
14861 if (LogIs3Enabled())
14862 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14863 }
14864 else
14865# endif
14866 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14867 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14868 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14869}
14870#endif
14871
14872
14873/**
14874 * Makes status code addjustments (pass up from I/O and access handler)
14875 * as well as maintaining statistics.
14876 *
14877 * @returns Strict VBox status code to pass up.
14878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14879 * @param rcStrict The status from executing an instruction.
14880 */
14881DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14882{
14883 if (rcStrict != VINF_SUCCESS)
14884 {
14885 if (RT_SUCCESS(rcStrict))
14886 {
14887 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14888 || rcStrict == VINF_IOM_R3_IOPORT_READ
14889 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14890 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14891 || rcStrict == VINF_IOM_R3_MMIO_READ
14892 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14893 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14894 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14895 || rcStrict == VINF_CPUM_R3_MSR_READ
14896 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14897 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14898 || rcStrict == VINF_EM_RAW_TO_R3
14899 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14900 || rcStrict == VINF_EM_TRIPLE_FAULT
14901 /* raw-mode / virt handlers only: */
14902 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14903 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14904 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14905 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14906 || rcStrict == VINF_SELM_SYNC_GDT
14907 || rcStrict == VINF_CSAM_PENDING_ACTION
14908 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14909 /* nested hw.virt codes: */
14910 || rcStrict == VINF_SVM_VMEXIT
14911 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14912/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14913 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14914#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14915 if ( rcStrict == VINF_SVM_VMEXIT
14916 && rcPassUp == VINF_SUCCESS)
14917 rcStrict = VINF_SUCCESS;
14918 else
14919#endif
14920 if (rcPassUp == VINF_SUCCESS)
14921 pVCpu->iem.s.cRetInfStatuses++;
14922 else if ( rcPassUp < VINF_EM_FIRST
14923 || rcPassUp > VINF_EM_LAST
14924 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14925 {
14926 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14927 pVCpu->iem.s.cRetPassUpStatus++;
14928 rcStrict = rcPassUp;
14929 }
14930 else
14931 {
14932 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14933 pVCpu->iem.s.cRetInfStatuses++;
14934 }
14935 }
14936 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14937 pVCpu->iem.s.cRetAspectNotImplemented++;
14938 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14939 pVCpu->iem.s.cRetInstrNotImplemented++;
14940#ifdef IEM_VERIFICATION_MODE_FULL
14941 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14942 rcStrict = VINF_SUCCESS;
14943#endif
14944 else
14945 pVCpu->iem.s.cRetErrStatuses++;
14946 }
14947 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14948 {
14949 pVCpu->iem.s.cRetPassUpStatus++;
14950 rcStrict = pVCpu->iem.s.rcPassUp;
14951 }
14952
14953 return rcStrict;
14954}
14955
14956
14957/**
14958 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14959 * IEMExecOneWithPrefetchedByPC.
14960 *
14961 * Similar code is found in IEMExecLots.
14962 *
14963 * @return Strict VBox status code.
14964 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14966 * @param fExecuteInhibit If set, execute the instruction following CLI,
14967 * POP SS and MOV SS,GR.
14968 */
14969DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14970{
14971 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14972 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14973 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14974
14975#ifdef IEM_WITH_SETJMP
14976 VBOXSTRICTRC rcStrict;
14977 jmp_buf JmpBuf;
14978 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14979 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14980 if ((rcStrict = setjmp(JmpBuf)) == 0)
14981 {
14982 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14983 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14984 }
14985 else
14986 pVCpu->iem.s.cLongJumps++;
14987 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14988#else
14989 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14990 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14991#endif
14992 if (rcStrict == VINF_SUCCESS)
14993 pVCpu->iem.s.cInstructions++;
14994 if (pVCpu->iem.s.cActiveMappings > 0)
14995 {
14996 Assert(rcStrict != VINF_SUCCESS);
14997 iemMemRollback(pVCpu);
14998 }
14999 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
15000 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
15001 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
15002
15003//#ifdef DEBUG
15004// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
15005//#endif
15006
15007 /* Execute the next instruction as well if a cli, pop ss or
15008 mov ss, Gr has just completed successfully. */
15009 if ( fExecuteInhibit
15010 && rcStrict == VINF_SUCCESS
15011 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
15012 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
15013 {
15014 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
15015 if (rcStrict == VINF_SUCCESS)
15016 {
15017#ifdef LOG_ENABLED
15018 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
15019#endif
15020#ifdef IEM_WITH_SETJMP
15021 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15022 if ((rcStrict = setjmp(JmpBuf)) == 0)
15023 {
15024 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15025 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15026 }
15027 else
15028 pVCpu->iem.s.cLongJumps++;
15029 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15030#else
15031 IEM_OPCODE_GET_NEXT_U8(&b);
15032 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15033#endif
15034 if (rcStrict == VINF_SUCCESS)
15035 pVCpu->iem.s.cInstructions++;
15036 if (pVCpu->iem.s.cActiveMappings > 0)
15037 {
15038 Assert(rcStrict != VINF_SUCCESS);
15039 iemMemRollback(pVCpu);
15040 }
15041 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
15042 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
15043 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
15044 }
15045 else if (pVCpu->iem.s.cActiveMappings > 0)
15046 iemMemRollback(pVCpu);
15047 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
15048 }
15049
15050 /*
15051 * Return value fiddling, statistics and sanity assertions.
15052 */
15053 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15054
15055 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15056 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15057#if defined(IEM_VERIFICATION_MODE_FULL)
15058 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15059 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15060 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15061 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15062#endif
15063 return rcStrict;
15064}
15065
15066
15067#ifdef IN_RC
15068/**
15069 * Re-enters raw-mode or ensure we return to ring-3.
15070 *
15071 * @returns rcStrict, maybe modified.
15072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15073 * @param pCtx The current CPU context.
15074 * @param rcStrict The status code returne by the interpreter.
15075 */
15076DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
15077{
15078 if ( !pVCpu->iem.s.fInPatchCode
15079 && ( rcStrict == VINF_SUCCESS
15080 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
15081 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
15082 {
15083 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
15084 CPUMRawEnter(pVCpu);
15085 else
15086 {
15087 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
15088 rcStrict = VINF_EM_RESCHEDULE;
15089 }
15090 }
15091 return rcStrict;
15092}
15093#endif
15094
15095
15096/**
15097 * Execute one instruction.
15098 *
15099 * @return Strict VBox status code.
15100 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15101 */
15102VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
15103{
15104#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15105 if (++pVCpu->iem.s.cVerifyDepth == 1)
15106 iemExecVerificationModeSetup(pVCpu);
15107#endif
15108#ifdef LOG_ENABLED
15109 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15110 iemLogCurInstr(pVCpu, pCtx, true);
15111#endif
15112
15113 /*
15114 * Do the decoding and emulation.
15115 */
15116 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15117 if (rcStrict == VINF_SUCCESS)
15118 rcStrict = iemExecOneInner(pVCpu, true);
15119 else if (pVCpu->iem.s.cActiveMappings > 0)
15120 iemMemRollback(pVCpu);
15121
15122#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15123 /*
15124 * Assert some sanity.
15125 */
15126 if (pVCpu->iem.s.cVerifyDepth == 1)
15127 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15128 pVCpu->iem.s.cVerifyDepth--;
15129#endif
15130#ifdef IN_RC
15131 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15132#endif
15133 if (rcStrict != VINF_SUCCESS)
15134 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15135 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15136 return rcStrict;
15137}
15138
15139
15140VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15141{
15142 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15143 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15144
15145 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15146 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15147 if (rcStrict == VINF_SUCCESS)
15148 {
15149 rcStrict = iemExecOneInner(pVCpu, true);
15150 if (pcbWritten)
15151 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15152 }
15153 else if (pVCpu->iem.s.cActiveMappings > 0)
15154 iemMemRollback(pVCpu);
15155
15156#ifdef IN_RC
15157 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15158#endif
15159 return rcStrict;
15160}
15161
15162
15163VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15164 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15165{
15166 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15167 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15168
15169 VBOXSTRICTRC rcStrict;
15170 if ( cbOpcodeBytes
15171 && pCtx->rip == OpcodeBytesPC)
15172 {
15173 iemInitDecoder(pVCpu, false);
15174#ifdef IEM_WITH_CODE_TLB
15175 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15176 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15177 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15178 pVCpu->iem.s.offCurInstrStart = 0;
15179 pVCpu->iem.s.offInstrNextByte = 0;
15180#else
15181 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15182 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15183#endif
15184 rcStrict = VINF_SUCCESS;
15185 }
15186 else
15187 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15188 if (rcStrict == VINF_SUCCESS)
15189 rcStrict = iemExecOneInner(pVCpu, true);
15190 else if (pVCpu->iem.s.cActiveMappings > 0)
15191 iemMemRollback(pVCpu);
15192
15193#ifdef IN_RC
15194 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15195#endif
15196 return rcStrict;
15197}
15198
15199
15200VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15201{
15202 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15203 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15204
15205 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15206 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15207 if (rcStrict == VINF_SUCCESS)
15208 {
15209 rcStrict = iemExecOneInner(pVCpu, false);
15210 if (pcbWritten)
15211 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15212 }
15213 else if (pVCpu->iem.s.cActiveMappings > 0)
15214 iemMemRollback(pVCpu);
15215
15216#ifdef IN_RC
15217 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15218#endif
15219 return rcStrict;
15220}
15221
15222
15223VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15224 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15225{
15226 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15227 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15228
15229 VBOXSTRICTRC rcStrict;
15230 if ( cbOpcodeBytes
15231 && pCtx->rip == OpcodeBytesPC)
15232 {
15233 iemInitDecoder(pVCpu, true);
15234#ifdef IEM_WITH_CODE_TLB
15235 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15236 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15237 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15238 pVCpu->iem.s.offCurInstrStart = 0;
15239 pVCpu->iem.s.offInstrNextByte = 0;
15240#else
15241 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15242 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15243#endif
15244 rcStrict = VINF_SUCCESS;
15245 }
15246 else
15247 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15248 if (rcStrict == VINF_SUCCESS)
15249 rcStrict = iemExecOneInner(pVCpu, false);
15250 else if (pVCpu->iem.s.cActiveMappings > 0)
15251 iemMemRollback(pVCpu);
15252
15253#ifdef IN_RC
15254 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15255#endif
15256 return rcStrict;
15257}
15258
15259
15260/**
15261 * For debugging DISGetParamSize, may come in handy.
15262 *
15263 * @returns Strict VBox status code.
15264 * @param pVCpu The cross context virtual CPU structure of the
15265 * calling EMT.
15266 * @param pCtxCore The context core structure.
15267 * @param OpcodeBytesPC The PC of the opcode bytes.
15268 * @param pvOpcodeBytes Prefeched opcode bytes.
15269 * @param cbOpcodeBytes Number of prefetched bytes.
15270 * @param pcbWritten Where to return the number of bytes written.
15271 * Optional.
15272 */
15273VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15274 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
15275 uint32_t *pcbWritten)
15276{
15277 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15278 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15279
15280 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15281 VBOXSTRICTRC rcStrict;
15282 if ( cbOpcodeBytes
15283 && pCtx->rip == OpcodeBytesPC)
15284 {
15285 iemInitDecoder(pVCpu, true);
15286#ifdef IEM_WITH_CODE_TLB
15287 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15288 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15289 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15290 pVCpu->iem.s.offCurInstrStart = 0;
15291 pVCpu->iem.s.offInstrNextByte = 0;
15292#else
15293 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15294 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15295#endif
15296 rcStrict = VINF_SUCCESS;
15297 }
15298 else
15299 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15300 if (rcStrict == VINF_SUCCESS)
15301 {
15302 rcStrict = iemExecOneInner(pVCpu, false);
15303 if (pcbWritten)
15304 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15305 }
15306 else if (pVCpu->iem.s.cActiveMappings > 0)
15307 iemMemRollback(pVCpu);
15308
15309#ifdef IN_RC
15310 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15311#endif
15312 return rcStrict;
15313}
15314
15315
15316VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
15317{
15318 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
15319
15320#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15321 /*
15322 * See if there is an interrupt pending in TRPM, inject it if we can.
15323 */
15324 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15325# ifdef IEM_VERIFICATION_MODE_FULL
15326 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15327# endif
15328
15329 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */
15330# if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
15331 bool fIntrEnabled = pCtx->hwvirt.Gif;
15332 if (fIntrEnabled)
15333 {
15334 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15335 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15336 else
15337 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15338 }
15339# else
15340 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15341# endif
15342 if ( fIntrEnabled
15343 && TRPMHasTrap(pVCpu)
15344 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15345 {
15346 uint8_t u8TrapNo;
15347 TRPMEVENT enmType;
15348 RTGCUINT uErrCode;
15349 RTGCPTR uCr2;
15350 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15351 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15352 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15353 TRPMResetTrap(pVCpu);
15354 }
15355
15356 /*
15357 * Log the state.
15358 */
15359# ifdef LOG_ENABLED
15360 iemLogCurInstr(pVCpu, pCtx, true);
15361# endif
15362
15363 /*
15364 * Do the decoding and emulation.
15365 */
15366 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15367 if (rcStrict == VINF_SUCCESS)
15368 rcStrict = iemExecOneInner(pVCpu, true);
15369 else if (pVCpu->iem.s.cActiveMappings > 0)
15370 iemMemRollback(pVCpu);
15371
15372 /*
15373 * Assert some sanity.
15374 */
15375 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15376
15377 /*
15378 * Log and return.
15379 */
15380 if (rcStrict != VINF_SUCCESS)
15381 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15382 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15383 if (pcInstructions)
15384 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15385 return rcStrict;
15386
15387#else /* Not verification mode */
15388
15389 /*
15390 * See if there is an interrupt pending in TRPM, inject it if we can.
15391 */
15392 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15393# ifdef IEM_VERIFICATION_MODE_FULL
15394 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15395# endif
15396
15397 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
15398# if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
15399 bool fIntrEnabled = pCtx->hwvirt.fGif;
15400 if (fIntrEnabled)
15401 {
15402 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
15403 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
15404 else
15405 fIntrEnabled = pCtx->eflags.Bits.u1IF;
15406 }
15407# else
15408 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
15409# endif
15410 if ( fIntrEnabled
15411 && TRPMHasTrap(pVCpu)
15412 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15413 {
15414 uint8_t u8TrapNo;
15415 TRPMEVENT enmType;
15416 RTGCUINT uErrCode;
15417 RTGCPTR uCr2;
15418 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15419 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15420 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15421 TRPMResetTrap(pVCpu);
15422 }
15423
15424 /*
15425 * Initial decoder init w/ prefetch, then setup setjmp.
15426 */
15427 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15428 if (rcStrict == VINF_SUCCESS)
15429 {
15430# ifdef IEM_WITH_SETJMP
15431 jmp_buf JmpBuf;
15432 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15433 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15434 pVCpu->iem.s.cActiveMappings = 0;
15435 if ((rcStrict = setjmp(JmpBuf)) == 0)
15436# endif
15437 {
15438 /*
15439 * The run loop. We limit ourselves to 4096 instructions right now.
15440 */
15441 PVM pVM = pVCpu->CTX_SUFF(pVM);
15442 uint32_t cInstr = 4096;
15443 for (;;)
15444 {
15445 /*
15446 * Log the state.
15447 */
15448# ifdef LOG_ENABLED
15449 iemLogCurInstr(pVCpu, pCtx, true);
15450# endif
15451
15452 /*
15453 * Do the decoding and emulation.
15454 */
15455 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15456 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15457 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15458 {
15459 Assert(pVCpu->iem.s.cActiveMappings == 0);
15460 pVCpu->iem.s.cInstructions++;
15461 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15462 {
15463 uint32_t fCpu = pVCpu->fLocalForcedActions
15464 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15465 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15466 | VMCPU_FF_TLB_FLUSH
15467# ifdef VBOX_WITH_RAW_MODE
15468 | VMCPU_FF_TRPM_SYNC_IDT
15469 | VMCPU_FF_SELM_SYNC_TSS
15470 | VMCPU_FF_SELM_SYNC_GDT
15471 | VMCPU_FF_SELM_SYNC_LDT
15472# endif
15473 | VMCPU_FF_INHIBIT_INTERRUPTS
15474 | VMCPU_FF_BLOCK_NMIS
15475 | VMCPU_FF_UNHALT ));
15476
15477 if (RT_LIKELY( ( !fCpu
15478 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15479 && !pCtx->rflags.Bits.u1IF) )
15480 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15481 {
15482 if (cInstr-- > 0)
15483 {
15484 Assert(pVCpu->iem.s.cActiveMappings == 0);
15485 iemReInitDecoder(pVCpu);
15486 continue;
15487 }
15488 }
15489 }
15490 Assert(pVCpu->iem.s.cActiveMappings == 0);
15491 }
15492 else if (pVCpu->iem.s.cActiveMappings > 0)
15493 iemMemRollback(pVCpu);
15494 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15495 break;
15496 }
15497 }
15498# ifdef IEM_WITH_SETJMP
15499 else
15500 {
15501 if (pVCpu->iem.s.cActiveMappings > 0)
15502 iemMemRollback(pVCpu);
15503 pVCpu->iem.s.cLongJumps++;
15504 }
15505 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15506# endif
15507
15508 /*
15509 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15510 */
15511 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15512 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15513# if defined(IEM_VERIFICATION_MODE_FULL)
15514 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15515 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15516 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15517 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15518# endif
15519 }
15520 else
15521 {
15522 if (pVCpu->iem.s.cActiveMappings > 0)
15523 iemMemRollback(pVCpu);
15524
15525# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15526 /*
15527 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
15528 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
15529 */
15530 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15531# endif
15532 }
15533
15534 /*
15535 * Maybe re-enter raw-mode and log.
15536 */
15537# ifdef IN_RC
15538 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15539# endif
15540 if (rcStrict != VINF_SUCCESS)
15541 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15542 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15543 if (pcInstructions)
15544 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15545 return rcStrict;
15546#endif /* Not verification mode */
15547}
15548
15549
15550
15551/**
15552 * Injects a trap, fault, abort, software interrupt or external interrupt.
15553 *
15554 * The parameter list matches TRPMQueryTrapAll pretty closely.
15555 *
15556 * @returns Strict VBox status code.
15557 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15558 * @param u8TrapNo The trap number.
15559 * @param enmType What type is it (trap/fault/abort), software
15560 * interrupt or hardware interrupt.
15561 * @param uErrCode The error code if applicable.
15562 * @param uCr2 The CR2 value if applicable.
15563 * @param cbInstr The instruction length (only relevant for
15564 * software interrupts).
15565 */
15566VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15567 uint8_t cbInstr)
15568{
15569 iemInitDecoder(pVCpu, false);
15570#ifdef DBGFTRACE_ENABLED
15571 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15572 u8TrapNo, enmType, uErrCode, uCr2);
15573#endif
15574
15575 uint32_t fFlags;
15576 switch (enmType)
15577 {
15578 case TRPM_HARDWARE_INT:
15579 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15580 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15581 uErrCode = uCr2 = 0;
15582 break;
15583
15584 case TRPM_SOFTWARE_INT:
15585 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15586 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15587 uErrCode = uCr2 = 0;
15588 break;
15589
15590 case TRPM_TRAP:
15591 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15592 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15593 if (u8TrapNo == X86_XCPT_PF)
15594 fFlags |= IEM_XCPT_FLAGS_CR2;
15595 switch (u8TrapNo)
15596 {
15597 case X86_XCPT_DF:
15598 case X86_XCPT_TS:
15599 case X86_XCPT_NP:
15600 case X86_XCPT_SS:
15601 case X86_XCPT_PF:
15602 case X86_XCPT_AC:
15603 fFlags |= IEM_XCPT_FLAGS_ERR;
15604 break;
15605
15606 case X86_XCPT_NMI:
15607 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15608 break;
15609 }
15610 break;
15611
15612 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15613 }
15614
15615 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15616
15617 if (pVCpu->iem.s.cActiveMappings > 0)
15618 iemMemRollback(pVCpu);
15619 return rcStrict;
15620}
15621
15622
15623/**
15624 * Injects the active TRPM event.
15625 *
15626 * @returns Strict VBox status code.
15627 * @param pVCpu The cross context virtual CPU structure.
15628 */
15629VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15630{
15631#ifndef IEM_IMPLEMENTS_TASKSWITCH
15632 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15633#else
15634 uint8_t u8TrapNo;
15635 TRPMEVENT enmType;
15636 RTGCUINT uErrCode;
15637 RTGCUINTPTR uCr2;
15638 uint8_t cbInstr;
15639 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15640 if (RT_FAILURE(rc))
15641 return rc;
15642
15643 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15644
15645#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15646 if (rcStrict == VINF_SVM_VMEXIT)
15647 rcStrict = VINF_SUCCESS;
15648#endif
15649
15650 /** @todo Are there any other codes that imply the event was successfully
15651 * delivered to the guest? See @bugref{6607}. */
15652 if ( rcStrict == VINF_SUCCESS
15653 || rcStrict == VINF_IEM_RAISED_XCPT)
15654 {
15655 TRPMResetTrap(pVCpu);
15656 }
15657 return rcStrict;
15658#endif
15659}
15660
15661
15662VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15663{
15664 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15665 return VERR_NOT_IMPLEMENTED;
15666}
15667
15668
15669VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15670{
15671 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15672 return VERR_NOT_IMPLEMENTED;
15673}
15674
15675
15676#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15677/**
15678 * Executes a IRET instruction with default operand size.
15679 *
15680 * This is for PATM.
15681 *
15682 * @returns VBox status code.
15683 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15684 * @param pCtxCore The register frame.
15685 */
15686VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15687{
15688 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15689
15690 iemCtxCoreToCtx(pCtx, pCtxCore);
15691 iemInitDecoder(pVCpu);
15692 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15693 if (rcStrict == VINF_SUCCESS)
15694 iemCtxToCtxCore(pCtxCore, pCtx);
15695 else
15696 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15697 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15698 return rcStrict;
15699}
15700#endif
15701
15702
15703/**
15704 * Macro used by the IEMExec* method to check the given instruction length.
15705 *
15706 * Will return on failure!
15707 *
15708 * @param a_cbInstr The given instruction length.
15709 * @param a_cbMin The minimum length.
15710 */
15711#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15712 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15713 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15714
15715
15716/**
15717 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15718 *
15719 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15720 *
15721 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15723 * @param rcStrict The status code to fiddle.
15724 */
15725DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15726{
15727 iemUninitExec(pVCpu);
15728#ifdef IN_RC
15729 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15730 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15731#else
15732 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15733#endif
15734}
15735
15736
15737/**
15738 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15739 *
15740 * This API ASSUMES that the caller has already verified that the guest code is
15741 * allowed to access the I/O port. (The I/O port is in the DX register in the
15742 * guest state.)
15743 *
15744 * @returns Strict VBox status code.
15745 * @param pVCpu The cross context virtual CPU structure.
15746 * @param cbValue The size of the I/O port access (1, 2, or 4).
15747 * @param enmAddrMode The addressing mode.
15748 * @param fRepPrefix Indicates whether a repeat prefix is used
15749 * (doesn't matter which for this instruction).
15750 * @param cbInstr The instruction length in bytes.
15751 * @param iEffSeg The effective segment address.
15752 * @param fIoChecked Whether the access to the I/O port has been
15753 * checked or not. It's typically checked in the
15754 * HM scenario.
15755 */
15756VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15757 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15758{
15759 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15760 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15761
15762 /*
15763 * State init.
15764 */
15765 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15766
15767 /*
15768 * Switch orgy for getting to the right handler.
15769 */
15770 VBOXSTRICTRC rcStrict;
15771 if (fRepPrefix)
15772 {
15773 switch (enmAddrMode)
15774 {
15775 case IEMMODE_16BIT:
15776 switch (cbValue)
15777 {
15778 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15779 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15780 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15781 default:
15782 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15783 }
15784 break;
15785
15786 case IEMMODE_32BIT:
15787 switch (cbValue)
15788 {
15789 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15790 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15791 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15792 default:
15793 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15794 }
15795 break;
15796
15797 case IEMMODE_64BIT:
15798 switch (cbValue)
15799 {
15800 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15801 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15802 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15803 default:
15804 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15805 }
15806 break;
15807
15808 default:
15809 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15810 }
15811 }
15812 else
15813 {
15814 switch (enmAddrMode)
15815 {
15816 case IEMMODE_16BIT:
15817 switch (cbValue)
15818 {
15819 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15820 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15821 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15822 default:
15823 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15824 }
15825 break;
15826
15827 case IEMMODE_32BIT:
15828 switch (cbValue)
15829 {
15830 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15831 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15832 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15833 default:
15834 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15835 }
15836 break;
15837
15838 case IEMMODE_64BIT:
15839 switch (cbValue)
15840 {
15841 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15842 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15843 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15844 default:
15845 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15846 }
15847 break;
15848
15849 default:
15850 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15851 }
15852 }
15853
15854 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15855}
15856
15857
15858/**
15859 * Interface for HM and EM for executing string I/O IN (read) instructions.
15860 *
15861 * This API ASSUMES that the caller has already verified that the guest code is
15862 * allowed to access the I/O port. (The I/O port is in the DX register in the
15863 * guest state.)
15864 *
15865 * @returns Strict VBox status code.
15866 * @param pVCpu The cross context virtual CPU structure.
15867 * @param cbValue The size of the I/O port access (1, 2, or 4).
15868 * @param enmAddrMode The addressing mode.
15869 * @param fRepPrefix Indicates whether a repeat prefix is used
15870 * (doesn't matter which for this instruction).
15871 * @param cbInstr The instruction length in bytes.
15872 * @param fIoChecked Whether the access to the I/O port has been
15873 * checked or not. It's typically checked in the
15874 * HM scenario.
15875 */
15876VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15877 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15878{
15879 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15880
15881 /*
15882 * State init.
15883 */
15884 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15885
15886 /*
15887 * Switch orgy for getting to the right handler.
15888 */
15889 VBOXSTRICTRC rcStrict;
15890 if (fRepPrefix)
15891 {
15892 switch (enmAddrMode)
15893 {
15894 case IEMMODE_16BIT:
15895 switch (cbValue)
15896 {
15897 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15898 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15899 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15900 default:
15901 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15902 }
15903 break;
15904
15905 case IEMMODE_32BIT:
15906 switch (cbValue)
15907 {
15908 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15909 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15910 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15911 default:
15912 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15913 }
15914 break;
15915
15916 case IEMMODE_64BIT:
15917 switch (cbValue)
15918 {
15919 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15920 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15921 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15922 default:
15923 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15924 }
15925 break;
15926
15927 default:
15928 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15929 }
15930 }
15931 else
15932 {
15933 switch (enmAddrMode)
15934 {
15935 case IEMMODE_16BIT:
15936 switch (cbValue)
15937 {
15938 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15939 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15940 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15941 default:
15942 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15943 }
15944 break;
15945
15946 case IEMMODE_32BIT:
15947 switch (cbValue)
15948 {
15949 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15950 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15951 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15952 default:
15953 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15954 }
15955 break;
15956
15957 case IEMMODE_64BIT:
15958 switch (cbValue)
15959 {
15960 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15961 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15962 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15963 default:
15964 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15965 }
15966 break;
15967
15968 default:
15969 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15970 }
15971 }
15972
15973 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15974}
15975
15976
15977/**
15978 * Interface for rawmode to write execute an OUT instruction.
15979 *
15980 * @returns Strict VBox status code.
15981 * @param pVCpu The cross context virtual CPU structure.
15982 * @param cbInstr The instruction length in bytes.
15983 * @param u16Port The port to read.
15984 * @param cbReg The register size.
15985 *
15986 * @remarks In ring-0 not all of the state needs to be synced in.
15987 */
15988VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15989{
15990 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15991 Assert(cbReg <= 4 && cbReg != 3);
15992
15993 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15994 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15995 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15996}
15997
15998
15999/**
16000 * Interface for rawmode to write execute an IN instruction.
16001 *
16002 * @returns Strict VBox status code.
16003 * @param pVCpu The cross context virtual CPU structure.
16004 * @param cbInstr The instruction length in bytes.
16005 * @param u16Port The port to read.
16006 * @param cbReg The register size.
16007 */
16008VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
16009{
16010 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
16011 Assert(cbReg <= 4 && cbReg != 3);
16012
16013 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16014 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
16015 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16016}
16017
16018
16019/**
16020 * Interface for HM and EM to write to a CRx register.
16021 *
16022 * @returns Strict VBox status code.
16023 * @param pVCpu The cross context virtual CPU structure.
16024 * @param cbInstr The instruction length in bytes.
16025 * @param iCrReg The control register number (destination).
16026 * @param iGReg The general purpose register number (source).
16027 *
16028 * @remarks In ring-0 not all of the state needs to be synced in.
16029 */
16030VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
16031{
16032 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16033 Assert(iCrReg < 16);
16034 Assert(iGReg < 16);
16035
16036 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16037 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
16038 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16039}
16040
16041
16042/**
16043 * Interface for HM and EM to read from a CRx register.
16044 *
16045 * @returns Strict VBox status code.
16046 * @param pVCpu The cross context virtual CPU structure.
16047 * @param cbInstr The instruction length in bytes.
16048 * @param iGReg The general purpose register number (destination).
16049 * @param iCrReg The control register number (source).
16050 *
16051 * @remarks In ring-0 not all of the state needs to be synced in.
16052 */
16053VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
16054{
16055 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16056 Assert(iCrReg < 16);
16057 Assert(iGReg < 16);
16058
16059 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16060 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
16061 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16062}
16063
16064
16065/**
16066 * Interface for HM and EM to clear the CR0[TS] bit.
16067 *
16068 * @returns Strict VBox status code.
16069 * @param pVCpu The cross context virtual CPU structure.
16070 * @param cbInstr The instruction length in bytes.
16071 *
16072 * @remarks In ring-0 not all of the state needs to be synced in.
16073 */
16074VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
16075{
16076 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16077
16078 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16079 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
16080 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16081}
16082
16083
16084/**
16085 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
16086 *
16087 * @returns Strict VBox status code.
16088 * @param pVCpu The cross context virtual CPU structure.
16089 * @param cbInstr The instruction length in bytes.
16090 * @param uValue The value to load into CR0.
16091 *
16092 * @remarks In ring-0 not all of the state needs to be synced in.
16093 */
16094VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
16095{
16096 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16097
16098 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16099 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
16100 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16101}
16102
16103
16104/**
16105 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
16106 *
16107 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
16108 *
16109 * @returns Strict VBox status code.
16110 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16111 * @param cbInstr The instruction length in bytes.
16112 * @remarks In ring-0 not all of the state needs to be synced in.
16113 * @thread EMT(pVCpu)
16114 */
16115VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
16116{
16117 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16118
16119 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16120 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
16121 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16122}
16123
16124
16125/**
16126 * Interface for HM and EM to emulate the INVLPG instruction.
16127 *
16128 * @param pVCpu The cross context virtual CPU structure.
16129 * @param cbInstr The instruction length in bytes.
16130 * @param GCPtrPage The effective address of the page to invalidate.
16131 *
16132 * @remarks In ring-0 not all of the state needs to be synced in.
16133 */
16134VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
16135{
16136 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16137
16138 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16139 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
16140 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16141}
16142
16143
16144/**
16145 * Interface for HM and EM to emulate the INVPCID instruction.
16146 *
16147 * @param pVCpu The cross context virtual CPU structure.
16148 * @param cbInstr The instruction length in bytes.
16149 * @param uType The invalidation type.
16150 * @param GCPtrInvpcidDesc The effective address of the INVPCID descriptor.
16151 *
16152 * @remarks In ring-0 not all of the state needs to be synced in.
16153 */
16154VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)
16155{
16156 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
16157
16158 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16159 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);
16160 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16161}
16162
16163
16164/**
16165 * Checks if IEM is in the process of delivering an event (interrupt or
16166 * exception).
16167 *
16168 * @returns true if we're in the process of raising an interrupt or exception,
16169 * false otherwise.
16170 * @param pVCpu The cross context virtual CPU structure.
16171 * @param puVector Where to store the vector associated with the
16172 * currently delivered event, optional.
16173 * @param pfFlags Where to store th event delivery flags (see
16174 * IEM_XCPT_FLAGS_XXX), optional.
16175 * @param puErr Where to store the error code associated with the
16176 * event, optional.
16177 * @param puCr2 Where to store the CR2 associated with the event,
16178 * optional.
16179 * @remarks The caller should check the flags to determine if the error code and
16180 * CR2 are valid for the event.
16181 */
16182VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
16183{
16184 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16185 if (fRaisingXcpt)
16186 {
16187 if (puVector)
16188 *puVector = pVCpu->iem.s.uCurXcpt;
16189 if (pfFlags)
16190 *pfFlags = pVCpu->iem.s.fCurXcpt;
16191 if (puErr)
16192 *puErr = pVCpu->iem.s.uCurXcptErr;
16193 if (puCr2)
16194 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16195 }
16196 return fRaisingXcpt;
16197}
16198
16199#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
16200/**
16201 * Interface for HM and EM to emulate the CLGI instruction.
16202 *
16203 * @returns Strict VBox status code.
16204 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16205 * @param cbInstr The instruction length in bytes.
16206 * @thread EMT(pVCpu)
16207 */
16208VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
16209{
16210 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16211
16212 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16213 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16214 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16215}
16216
16217
16218/**
16219 * Interface for HM and EM to emulate the STGI instruction.
16220 *
16221 * @returns Strict VBox status code.
16222 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16223 * @param cbInstr The instruction length in bytes.
16224 * @thread EMT(pVCpu)
16225 */
16226VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
16227{
16228 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16229
16230 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16231 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16232 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16233}
16234
16235
16236/**
16237 * Interface for HM and EM to emulate the VMLOAD instruction.
16238 *
16239 * @returns Strict VBox status code.
16240 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16241 * @param cbInstr The instruction length in bytes.
16242 * @thread EMT(pVCpu)
16243 */
16244VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
16245{
16246 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16247
16248 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16249 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16250 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16251}
16252
16253
16254/**
16255 * Interface for HM and EM to emulate the VMSAVE instruction.
16256 *
16257 * @returns Strict VBox status code.
16258 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16259 * @param cbInstr The instruction length in bytes.
16260 * @thread EMT(pVCpu)
16261 */
16262VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
16263{
16264 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16265
16266 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16267 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16268 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16269}
16270
16271
16272/**
16273 * Interface for HM and EM to emulate the INVLPGA instruction.
16274 *
16275 * @returns Strict VBox status code.
16276 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16277 * @param cbInstr The instruction length in bytes.
16278 * @thread EMT(pVCpu)
16279 */
16280VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
16281{
16282 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16283
16284 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16285 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16286 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16287}
16288
16289
16290/**
16291 * Interface for HM and EM to emulate the VMRUN instruction.
16292 *
16293 * @returns Strict VBox status code.
16294 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16295 * @param cbInstr The instruction length in bytes.
16296 * @thread EMT(pVCpu)
16297 */
16298VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
16299{
16300 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16301
16302 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16303 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
16304 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16305}
16306
16307
16308/**
16309 * Interface for HM and EM to emulate \#VMEXIT.
16310 *
16311 * @returns Strict VBox status code.
16312 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16313 * @param uExitCode The exit code.
16314 * @param uExitInfo1 The exit info. 1 field.
16315 * @param uExitInfo2 The exit info. 2 field.
16316 * @thread EMT(pVCpu)
16317 */
16318VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
16319{
16320 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2);
16321 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16322}
16323#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
16324
16325#ifdef IN_RING3
16326
16327/**
16328 * Handles the unlikely and probably fatal merge cases.
16329 *
16330 * @returns Merged status code.
16331 * @param rcStrict Current EM status code.
16332 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16333 * with @a rcStrict.
16334 * @param iMemMap The memory mapping index. For error reporting only.
16335 * @param pVCpu The cross context virtual CPU structure of the calling
16336 * thread, for error reporting only.
16337 */
16338DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16339 unsigned iMemMap, PVMCPU pVCpu)
16340{
16341 if (RT_FAILURE_NP(rcStrict))
16342 return rcStrict;
16343
16344 if (RT_FAILURE_NP(rcStrictCommit))
16345 return rcStrictCommit;
16346
16347 if (rcStrict == rcStrictCommit)
16348 return rcStrictCommit;
16349
16350 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16351 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16352 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16353 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16354 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16355 return VERR_IOM_FF_STATUS_IPE;
16356}
16357
16358
16359/**
16360 * Helper for IOMR3ProcessForceFlag.
16361 *
16362 * @returns Merged status code.
16363 * @param rcStrict Current EM status code.
16364 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16365 * with @a rcStrict.
16366 * @param iMemMap The memory mapping index. For error reporting only.
16367 * @param pVCpu The cross context virtual CPU structure of the calling
16368 * thread, for error reporting only.
16369 */
16370DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16371{
16372 /* Simple. */
16373 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16374 return rcStrictCommit;
16375
16376 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16377 return rcStrict;
16378
16379 /* EM scheduling status codes. */
16380 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16381 && rcStrict <= VINF_EM_LAST))
16382 {
16383 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16384 && rcStrictCommit <= VINF_EM_LAST))
16385 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16386 }
16387
16388 /* Unlikely */
16389 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16390}
16391
16392
16393/**
16394 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16395 *
16396 * @returns Merge between @a rcStrict and what the commit operation returned.
16397 * @param pVM The cross context VM structure.
16398 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16399 * @param rcStrict The status code returned by ring-0 or raw-mode.
16400 */
16401VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16402{
16403 /*
16404 * Reset the pending commit.
16405 */
16406 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16407 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16408 ("%#x %#x %#x\n",
16409 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16410 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16411
16412 /*
16413 * Commit the pending bounce buffers (usually just one).
16414 */
16415 unsigned cBufs = 0;
16416 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16417 while (iMemMap-- > 0)
16418 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16419 {
16420 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16421 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16422 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16423
16424 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16425 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16426 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16427
16428 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16429 {
16430 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16431 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16432 pbBuf,
16433 cbFirst,
16434 PGMACCESSORIGIN_IEM);
16435 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16436 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16437 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16438 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16439 }
16440
16441 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16442 {
16443 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16444 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16445 pbBuf + cbFirst,
16446 cbSecond,
16447 PGMACCESSORIGIN_IEM);
16448 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16449 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16450 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16451 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16452 }
16453 cBufs++;
16454 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16455 }
16456
16457 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16458 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16459 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16460 pVCpu->iem.s.cActiveMappings = 0;
16461 return rcStrict;
16462}
16463
16464#endif /* IN_RING3 */
16465
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette