VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 66974

Last change on this file since 66974 was 66957, checked in by vboxsync, 8 years ago

IEM: Some VEX related regression fixes and cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 633.8 KB
Line 
1/* $Id: IEMAll.cpp 66957 2017-05-18 16:21:24Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/hm_svm.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#ifdef IEM_VERIFICATION_MODE_FULL
118# include <VBox/vmm/rem.h>
119# include <VBox/vmm/mm.h>
120#endif
121#include <VBox/vmm/vm.h>
122#include <VBox/log.h>
123#include <VBox/err.h>
124#include <VBox/param.h>
125#include <VBox/dis.h>
126#include <VBox/disopcode.h>
127#include <iprt/assert.h>
128#include <iprt/string.h>
129#include <iprt/x86.h>
130
131
132/*********************************************************************************************************************************
133* Structures and Typedefs *
134*********************************************************************************************************************************/
135/** @typedef PFNIEMOP
136 * Pointer to an opcode decoder function.
137 */
138
139/** @def FNIEMOP_DEF
140 * Define an opcode decoder function.
141 *
142 * We're using macors for this so that adding and removing parameters as well as
143 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
144 *
145 * @param a_Name The function name.
146 */
147
148/** @typedef PFNIEMOPRM
149 * Pointer to an opcode decoder function with RM byte.
150 */
151
152/** @def FNIEMOPRM_DEF
153 * Define an opcode decoder function with RM byte.
154 *
155 * We're using macors for this so that adding and removing parameters as well as
156 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
157 *
158 * @param a_Name The function name.
159 */
160
161#if defined(__GNUC__) && defined(RT_ARCH_X86)
162typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
170
171#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
172typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
174# define FNIEMOP_DEF(a_Name) \
175 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
176# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
177 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
178# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
179 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
180
181#elif defined(__GNUC__)
182typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
183typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
184# define FNIEMOP_DEF(a_Name) \
185 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
186# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
187 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
188# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
189 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
190
191#else
192typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
193typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
194# define FNIEMOP_DEF(a_Name) \
195 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
196# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
197 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
198# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
199 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
200
201#endif
202#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
203
204
205/**
206 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
207 */
208typedef union IEMSELDESC
209{
210 /** The legacy view. */
211 X86DESC Legacy;
212 /** The long mode view. */
213 X86DESC64 Long;
214} IEMSELDESC;
215/** Pointer to a selector descriptor table entry. */
216typedef IEMSELDESC *PIEMSELDESC;
217
218/**
219 * CPU exception classes.
220 */
221typedef enum IEMXCPTCLASS
222{
223 IEMXCPTCLASS_BENIGN,
224 IEMXCPTCLASS_CONTRIBUTORY,
225 IEMXCPTCLASS_PAGE_FAULT
226} IEMXCPTCLASS;
227
228
229/*********************************************************************************************************************************
230* Defined Constants And Macros *
231*********************************************************************************************************************************/
232/** @def IEM_WITH_SETJMP
233 * Enables alternative status code handling using setjmps.
234 *
235 * This adds a bit of expense via the setjmp() call since it saves all the
236 * non-volatile registers. However, it eliminates return code checks and allows
237 * for more optimal return value passing (return regs instead of stack buffer).
238 */
239#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
240# define IEM_WITH_SETJMP
241#endif
242
243/** Temporary hack to disable the double execution. Will be removed in favor
244 * of a dedicated execution mode in EM. */
245//#define IEM_VERIFICATION_MODE_NO_REM
246
247/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
248 * due to GCC lacking knowledge about the value range of a switch. */
249#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
250
251/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
252#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
253
254/**
255 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
256 * occation.
257 */
258#ifdef LOG_ENABLED
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 do { \
261 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
262 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
263 } while (0)
264#else
265# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
266 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
267#endif
268
269/**
270 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
271 * occation using the supplied logger statement.
272 *
273 * @param a_LoggerArgs What to log on failure.
274 */
275#ifdef LOG_ENABLED
276# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
277 do { \
278 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
279 /*LogFunc(a_LoggerArgs);*/ \
280 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
281 } while (0)
282#else
283# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
284 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
285#endif
286
287/**
288 * Call an opcode decoder function.
289 *
290 * We're using macors for this so that adding and removing parameters can be
291 * done as we please. See FNIEMOP_DEF.
292 */
293#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
294
295/**
296 * Call a common opcode decoder function taking one extra argument.
297 *
298 * We're using macors for this so that adding and removing parameters can be
299 * done as we please. See FNIEMOP_DEF_1.
300 */
301#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
302
303/**
304 * Call a common opcode decoder function taking one extra argument.
305 *
306 * We're using macors for this so that adding and removing parameters can be
307 * done as we please. See FNIEMOP_DEF_1.
308 */
309#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
310
311/**
312 * Check if we're currently executing in real or virtual 8086 mode.
313 *
314 * @returns @c true if it is, @c false if not.
315 * @param a_pVCpu The IEM state of the current CPU.
316 */
317#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
318
319/**
320 * Check if we're currently executing in virtual 8086 mode.
321 *
322 * @returns @c true if it is, @c false if not.
323 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
324 */
325#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
326
327/**
328 * Check if we're currently executing in long mode.
329 *
330 * @returns @c true if it is, @c false if not.
331 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
332 */
333#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
334
335/**
336 * Check if we're currently executing in real mode.
337 *
338 * @returns @c true if it is, @c false if not.
339 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
340 */
341#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
342
343/**
344 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
345 * @returns PCCPUMFEATURES
346 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
347 */
348#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
349
350/**
351 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
352 * @returns PCCPUMFEATURES
353 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
354 */
355#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
356
357/**
358 * Evaluates to true if we're presenting an Intel CPU to the guest.
359 */
360#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
361
362/**
363 * Evaluates to true if we're presenting an AMD CPU to the guest.
364 */
365#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
366
367/**
368 * Check if the address is canonical.
369 */
370#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
371
372/**
373 * Gets the effective VEX.VVVV value.
374 *
375 * The 4th bit is ignored if not 64-bit code.
376 * @returns effective V-register value.
377 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
378 */
379#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
380 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
381
382/** @def IEM_USE_UNALIGNED_DATA_ACCESS
383 * Use unaligned accesses instead of elaborate byte assembly. */
384#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
385# define IEM_USE_UNALIGNED_DATA_ACCESS
386#endif
387
388#ifdef VBOX_WITH_NESTED_HWVIRT
389/**
390 * Check the common SVM instruction preconditions.
391 */
392# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
393 do { \
394 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
395 { \
396 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
397 return iemRaiseUndefinedOpcode(pVCpu); \
398 } \
399 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
400 { \
401 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
402 return iemRaiseUndefinedOpcode(pVCpu); \
403 } \
404 if (pVCpu->iem.s.uCpl != 0) \
405 { \
406 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
407 return iemRaiseGeneralProtectionFault0(pVCpu); \
408 } \
409 } while (0)
410
411/**
412 * Check if an SVM is enabled.
413 */
414# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
415
416/**
417 * Check if an SVM control/instruction intercept is set.
418 */
419# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
420
421/**
422 * Check if an SVM read CRx intercept is set.
423 */
424# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
425
426/**
427 * Check if an SVM write CRx intercept is set.
428 */
429# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
430
431/**
432 * Check if an SVM read DRx intercept is set.
433 */
434# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
435
436/**
437 * Check if an SVM write DRx intercept is set.
438 */
439# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
440
441/**
442 * Check if an SVM exception intercept is set.
443 */
444# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uVector)))
445
446/**
447 * Invokes the SVM \#VMEXIT handler for the nested-guest.
448 */
449# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
450 do \
451 { \
452 VBOXSTRICTRC rcStrictVmExit = HMSvmNstGstVmExit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), \
453 (a_uExitInfo2)); \
454 return rcStrictVmExit == VINF_SVM_VMEXIT ? VINF_SUCCESS : rcStrictVmExit; \
455 } while (0)
456
457/**
458 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
459 * corresponding decode assist information.
460 */
461# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
462 do \
463 { \
464 uint64_t uExitInfo1; \
465 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssist \
466 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
467 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
468 else \
469 uExitInfo1 = 0; \
470 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
471 } while (0)
472
473/**
474 * Checks and handles an SVM MSR intercept.
475 */
476# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) \
477 HMSvmNstGstHandleMsrIntercept((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_idMsr), (a_fWrite))
478
479#else
480# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
481# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
482# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
483# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
484# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
485# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
486# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
487# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
488# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
489# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
490# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) (VERR_SVM_IPE_1)
491
492#endif /* VBOX_WITH_NESTED_HWVIRT */
493
494
495/*********************************************************************************************************************************
496* Global Variables *
497*********************************************************************************************************************************/
498extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
499
500
501/** Function table for the ADD instruction. */
502IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
503{
504 iemAImpl_add_u8, iemAImpl_add_u8_locked,
505 iemAImpl_add_u16, iemAImpl_add_u16_locked,
506 iemAImpl_add_u32, iemAImpl_add_u32_locked,
507 iemAImpl_add_u64, iemAImpl_add_u64_locked
508};
509
510/** Function table for the ADC instruction. */
511IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
512{
513 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
514 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
515 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
516 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
517};
518
519/** Function table for the SUB instruction. */
520IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
521{
522 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
523 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
524 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
525 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
526};
527
528/** Function table for the SBB instruction. */
529IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
530{
531 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
532 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
533 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
534 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
535};
536
537/** Function table for the OR instruction. */
538IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
539{
540 iemAImpl_or_u8, iemAImpl_or_u8_locked,
541 iemAImpl_or_u16, iemAImpl_or_u16_locked,
542 iemAImpl_or_u32, iemAImpl_or_u32_locked,
543 iemAImpl_or_u64, iemAImpl_or_u64_locked
544};
545
546/** Function table for the XOR instruction. */
547IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
548{
549 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
550 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
551 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
552 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
553};
554
555/** Function table for the AND instruction. */
556IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
557{
558 iemAImpl_and_u8, iemAImpl_and_u8_locked,
559 iemAImpl_and_u16, iemAImpl_and_u16_locked,
560 iemAImpl_and_u32, iemAImpl_and_u32_locked,
561 iemAImpl_and_u64, iemAImpl_and_u64_locked
562};
563
564/** Function table for the CMP instruction.
565 * @remarks Making operand order ASSUMPTIONS.
566 */
567IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
568{
569 iemAImpl_cmp_u8, NULL,
570 iemAImpl_cmp_u16, NULL,
571 iemAImpl_cmp_u32, NULL,
572 iemAImpl_cmp_u64, NULL
573};
574
575/** Function table for the TEST instruction.
576 * @remarks Making operand order ASSUMPTIONS.
577 */
578IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
579{
580 iemAImpl_test_u8, NULL,
581 iemAImpl_test_u16, NULL,
582 iemAImpl_test_u32, NULL,
583 iemAImpl_test_u64, NULL
584};
585
586/** Function table for the BT instruction. */
587IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
588{
589 NULL, NULL,
590 iemAImpl_bt_u16, NULL,
591 iemAImpl_bt_u32, NULL,
592 iemAImpl_bt_u64, NULL
593};
594
595/** Function table for the BTC instruction. */
596IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
597{
598 NULL, NULL,
599 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
600 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
601 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
602};
603
604/** Function table for the BTR instruction. */
605IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
606{
607 NULL, NULL,
608 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
609 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
610 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
611};
612
613/** Function table for the BTS instruction. */
614IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
615{
616 NULL, NULL,
617 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
618 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
619 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
620};
621
622/** Function table for the BSF instruction. */
623IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
624{
625 NULL, NULL,
626 iemAImpl_bsf_u16, NULL,
627 iemAImpl_bsf_u32, NULL,
628 iemAImpl_bsf_u64, NULL
629};
630
631/** Function table for the BSR instruction. */
632IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
633{
634 NULL, NULL,
635 iemAImpl_bsr_u16, NULL,
636 iemAImpl_bsr_u32, NULL,
637 iemAImpl_bsr_u64, NULL
638};
639
640/** Function table for the IMUL instruction. */
641IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
642{
643 NULL, NULL,
644 iemAImpl_imul_two_u16, NULL,
645 iemAImpl_imul_two_u32, NULL,
646 iemAImpl_imul_two_u64, NULL
647};
648
649/** Group 1 /r lookup table. */
650IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
651{
652 &g_iemAImpl_add,
653 &g_iemAImpl_or,
654 &g_iemAImpl_adc,
655 &g_iemAImpl_sbb,
656 &g_iemAImpl_and,
657 &g_iemAImpl_sub,
658 &g_iemAImpl_xor,
659 &g_iemAImpl_cmp
660};
661
662/** Function table for the INC instruction. */
663IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
664{
665 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
666 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
667 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
668 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
669};
670
671/** Function table for the DEC instruction. */
672IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
673{
674 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
675 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
676 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
677 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
678};
679
680/** Function table for the NEG instruction. */
681IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
682{
683 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
684 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
685 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
686 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
687};
688
689/** Function table for the NOT instruction. */
690IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
691{
692 iemAImpl_not_u8, iemAImpl_not_u8_locked,
693 iemAImpl_not_u16, iemAImpl_not_u16_locked,
694 iemAImpl_not_u32, iemAImpl_not_u32_locked,
695 iemAImpl_not_u64, iemAImpl_not_u64_locked
696};
697
698
699/** Function table for the ROL instruction. */
700IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
701{
702 iemAImpl_rol_u8,
703 iemAImpl_rol_u16,
704 iemAImpl_rol_u32,
705 iemAImpl_rol_u64
706};
707
708/** Function table for the ROR instruction. */
709IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
710{
711 iemAImpl_ror_u8,
712 iemAImpl_ror_u16,
713 iemAImpl_ror_u32,
714 iemAImpl_ror_u64
715};
716
717/** Function table for the RCL instruction. */
718IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
719{
720 iemAImpl_rcl_u8,
721 iemAImpl_rcl_u16,
722 iemAImpl_rcl_u32,
723 iemAImpl_rcl_u64
724};
725
726/** Function table for the RCR instruction. */
727IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
728{
729 iemAImpl_rcr_u8,
730 iemAImpl_rcr_u16,
731 iemAImpl_rcr_u32,
732 iemAImpl_rcr_u64
733};
734
735/** Function table for the SHL instruction. */
736IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
737{
738 iemAImpl_shl_u8,
739 iemAImpl_shl_u16,
740 iemAImpl_shl_u32,
741 iemAImpl_shl_u64
742};
743
744/** Function table for the SHR instruction. */
745IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
746{
747 iemAImpl_shr_u8,
748 iemAImpl_shr_u16,
749 iemAImpl_shr_u32,
750 iemAImpl_shr_u64
751};
752
753/** Function table for the SAR instruction. */
754IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
755{
756 iemAImpl_sar_u8,
757 iemAImpl_sar_u16,
758 iemAImpl_sar_u32,
759 iemAImpl_sar_u64
760};
761
762
763/** Function table for the MUL instruction. */
764IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
765{
766 iemAImpl_mul_u8,
767 iemAImpl_mul_u16,
768 iemAImpl_mul_u32,
769 iemAImpl_mul_u64
770};
771
772/** Function table for the IMUL instruction working implicitly on rAX. */
773IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
774{
775 iemAImpl_imul_u8,
776 iemAImpl_imul_u16,
777 iemAImpl_imul_u32,
778 iemAImpl_imul_u64
779};
780
781/** Function table for the DIV instruction. */
782IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
783{
784 iemAImpl_div_u8,
785 iemAImpl_div_u16,
786 iemAImpl_div_u32,
787 iemAImpl_div_u64
788};
789
790/** Function table for the MUL instruction. */
791IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
792{
793 iemAImpl_idiv_u8,
794 iemAImpl_idiv_u16,
795 iemAImpl_idiv_u32,
796 iemAImpl_idiv_u64
797};
798
799/** Function table for the SHLD instruction */
800IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
801{
802 iemAImpl_shld_u16,
803 iemAImpl_shld_u32,
804 iemAImpl_shld_u64,
805};
806
807/** Function table for the SHRD instruction */
808IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
809{
810 iemAImpl_shrd_u16,
811 iemAImpl_shrd_u32,
812 iemAImpl_shrd_u64,
813};
814
815
816/** Function table for the PUNPCKLBW instruction */
817IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
818/** Function table for the PUNPCKLBD instruction */
819IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
820/** Function table for the PUNPCKLDQ instruction */
821IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
822/** Function table for the PUNPCKLQDQ instruction */
823IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
824
825/** Function table for the PUNPCKHBW instruction */
826IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
827/** Function table for the PUNPCKHBD instruction */
828IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
829/** Function table for the PUNPCKHDQ instruction */
830IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
831/** Function table for the PUNPCKHQDQ instruction */
832IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
833
834/** Function table for the PXOR instruction */
835IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
836/** Function table for the PCMPEQB instruction */
837IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
838/** Function table for the PCMPEQW instruction */
839IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
840/** Function table for the PCMPEQD instruction */
841IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
842
843
844#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
845/** What IEM just wrote. */
846uint8_t g_abIemWrote[256];
847/** How much IEM just wrote. */
848size_t g_cbIemWrote;
849#endif
850
851
852/*********************************************************************************************************************************
853* Internal Functions *
854*********************************************************************************************************************************/
855IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
856IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
857IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
858IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
859/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
860IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
861IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
862IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
863IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
864IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
865IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
866IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
867IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
868IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
869IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
870IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
871IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
872#ifdef IEM_WITH_SETJMP
873DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
874DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
875DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
876DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
877DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
878#endif
879
880IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
881IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
882IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
883IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
884IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
885IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
886IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
887IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
888IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
889IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
890IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
891IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
892IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
893IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
894IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
895IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
896
897#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
898IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
899#endif
900IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
901IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
902
903#ifdef VBOX_WITH_NESTED_HWVIRT
904/**
905 * Checks if the intercepted IO instruction causes a \#VMEXIT and handles it
906 * accordingly.
907 *
908 * @returns VBox strict status code.
909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
910 * @param u16Port The IO port being accessed.
911 * @param enmIoType The type of IO access.
912 * @param cbReg The IO operand size in bytes.
913 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
914 * @param iEffSeg The effective segment number.
915 * @param fRep Whether this is a repeating IO instruction (REP prefix).
916 * @param fStrIo Whether this is a string IO instruction.
917 * @param cbInstr The length of the IO instruction in bytes.
918 *
919 * @remarks This must be called only when IO instructions are intercepted by the
920 * nested-guest hypervisor.
921 */
922IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
923 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
924{
925 Assert(IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
926 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
927 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
928
929 static const uint32_t s_auIoOpSize[] = { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
930 static const uint32_t s_auIoAddrSize[] = { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
931
932 SVMIOIOEXITINFO IoExitInfo;
933 IoExitInfo.u = s_auIoOpSize[cbReg & 7];
934 IoExitInfo.u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
935 IoExitInfo.n.u1STR = fStrIo;
936 IoExitInfo.n.u1REP = fRep;
937 IoExitInfo.n.u3SEG = iEffSeg & 0x7;
938 IoExitInfo.n.u1Type = enmIoType;
939 IoExitInfo.n.u16Port = u16Port;
940
941 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
942 return HMSvmNstGstHandleIOIntercept(pVCpu, pCtx, &IoExitInfo, pCtx->rip + cbInstr);
943}
944
945#else
946IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
947 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
948{
949 RT_NOREF9(pVCpu, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo, cbInstr);
950 return VERR_IEM_IPE_9;
951}
952#endif /* VBOX_WITH_NESTED_HWVIRT */
953
954
955/**
956 * Sets the pass up status.
957 *
958 * @returns VINF_SUCCESS.
959 * @param pVCpu The cross context virtual CPU structure of the
960 * calling thread.
961 * @param rcPassUp The pass up status. Must be informational.
962 * VINF_SUCCESS is not allowed.
963 */
964IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
965{
966 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
967
968 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
969 if (rcOldPassUp == VINF_SUCCESS)
970 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
971 /* If both are EM scheduling codes, use EM priority rules. */
972 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
973 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
974 {
975 if (rcPassUp < rcOldPassUp)
976 {
977 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
978 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
979 }
980 else
981 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
982 }
983 /* Override EM scheduling with specific status code. */
984 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
985 {
986 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
987 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
988 }
989 /* Don't override specific status code, first come first served. */
990 else
991 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
992 return VINF_SUCCESS;
993}
994
995
996/**
997 * Calculates the CPU mode.
998 *
999 * This is mainly for updating IEMCPU::enmCpuMode.
1000 *
1001 * @returns CPU mode.
1002 * @param pCtx The register context for the CPU.
1003 */
1004DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
1005{
1006 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1007 return IEMMODE_64BIT;
1008 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1009 return IEMMODE_32BIT;
1010 return IEMMODE_16BIT;
1011}
1012
1013
1014/**
1015 * Initializes the execution state.
1016 *
1017 * @param pVCpu The cross context virtual CPU structure of the
1018 * calling thread.
1019 * @param fBypassHandlers Whether to bypass access handlers.
1020 *
1021 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1022 * side-effects in strict builds.
1023 */
1024DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1025{
1026 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1027
1028 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1029
1030#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1031 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1032 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1033 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1034 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1035 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1036 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1039#endif
1040
1041#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1042 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1043#endif
1044 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1045 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1046#ifdef VBOX_STRICT
1047 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1048 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1049 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1050 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1051 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1052 pVCpu->iem.s.uRexReg = 127;
1053 pVCpu->iem.s.uRexB = 127;
1054 pVCpu->iem.s.uRexIndex = 127;
1055 pVCpu->iem.s.iEffSeg = 127;
1056 pVCpu->iem.s.idxPrefix = 127;
1057 pVCpu->iem.s.uVex3rdReg = 127;
1058 pVCpu->iem.s.uVexLength = 127;
1059 pVCpu->iem.s.fEvexStuff = 127;
1060 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1061# ifdef IEM_WITH_CODE_TLB
1062 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1063 pVCpu->iem.s.pbInstrBuf = NULL;
1064 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1065 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1066 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1067 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1068# else
1069 pVCpu->iem.s.offOpcode = 127;
1070 pVCpu->iem.s.cbOpcode = 127;
1071# endif
1072#endif
1073
1074 pVCpu->iem.s.cActiveMappings = 0;
1075 pVCpu->iem.s.iNextMapping = 0;
1076 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1077 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1078#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1079 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1080 && pCtx->cs.u64Base == 0
1081 && pCtx->cs.u32Limit == UINT32_MAX
1082 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1083 if (!pVCpu->iem.s.fInPatchCode)
1084 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1085#endif
1086
1087#ifdef IEM_VERIFICATION_MODE_FULL
1088 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1089 pVCpu->iem.s.fNoRem = true;
1090#endif
1091}
1092
1093
1094/**
1095 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1096 *
1097 * @param pVCpu The cross context virtual CPU structure of the
1098 * calling thread.
1099 */
1100DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1101{
1102 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1103#ifdef IEM_VERIFICATION_MODE_FULL
1104 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1105#endif
1106#ifdef VBOX_STRICT
1107# ifdef IEM_WITH_CODE_TLB
1108 NOREF(pVCpu);
1109# else
1110 pVCpu->iem.s.cbOpcode = 0;
1111# endif
1112#else
1113 NOREF(pVCpu);
1114#endif
1115}
1116
1117
1118/**
1119 * Initializes the decoder state.
1120 *
1121 * iemReInitDecoder is mostly a copy of this function.
1122 *
1123 * @param pVCpu The cross context virtual CPU structure of the
1124 * calling thread.
1125 * @param fBypassHandlers Whether to bypass access handlers.
1126 */
1127DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1128{
1129 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1130
1131 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1132
1133#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1134 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1135 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1136 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1137 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1138 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1139 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1140 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1141 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1142#endif
1143
1144#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1145 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1146#endif
1147 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1148#ifdef IEM_VERIFICATION_MODE_FULL
1149 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1150 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1151#endif
1152 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1153 pVCpu->iem.s.enmCpuMode = enmMode;
1154 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1155 pVCpu->iem.s.enmEffAddrMode = enmMode;
1156 if (enmMode != IEMMODE_64BIT)
1157 {
1158 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1159 pVCpu->iem.s.enmEffOpSize = enmMode;
1160 }
1161 else
1162 {
1163 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1164 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1165 }
1166 pVCpu->iem.s.fPrefixes = 0;
1167 pVCpu->iem.s.uRexReg = 0;
1168 pVCpu->iem.s.uRexB = 0;
1169 pVCpu->iem.s.uRexIndex = 0;
1170 pVCpu->iem.s.idxPrefix = 0;
1171 pVCpu->iem.s.uVex3rdReg = 0;
1172 pVCpu->iem.s.uVexLength = 0;
1173 pVCpu->iem.s.fEvexStuff = 0;
1174 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1175#ifdef IEM_WITH_CODE_TLB
1176 pVCpu->iem.s.pbInstrBuf = NULL;
1177 pVCpu->iem.s.offInstrNextByte = 0;
1178 pVCpu->iem.s.offCurInstrStart = 0;
1179# ifdef VBOX_STRICT
1180 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1181 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1182 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1183# endif
1184#else
1185 pVCpu->iem.s.offOpcode = 0;
1186 pVCpu->iem.s.cbOpcode = 0;
1187#endif
1188 pVCpu->iem.s.cActiveMappings = 0;
1189 pVCpu->iem.s.iNextMapping = 0;
1190 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1191 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1192#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1193 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1194 && pCtx->cs.u64Base == 0
1195 && pCtx->cs.u32Limit == UINT32_MAX
1196 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1197 if (!pVCpu->iem.s.fInPatchCode)
1198 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1199#endif
1200
1201#ifdef DBGFTRACE_ENABLED
1202 switch (enmMode)
1203 {
1204 case IEMMODE_64BIT:
1205 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1206 break;
1207 case IEMMODE_32BIT:
1208 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1209 break;
1210 case IEMMODE_16BIT:
1211 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1212 break;
1213 }
1214#endif
1215}
1216
1217
1218/**
1219 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1220 *
1221 * This is mostly a copy of iemInitDecoder.
1222 *
1223 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1224 */
1225DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1226{
1227 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1228
1229 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1230
1231#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1232 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1233 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1234 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1235 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1236 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1237 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1238 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1239 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1240#endif
1241
1242 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1243#ifdef IEM_VERIFICATION_MODE_FULL
1244 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1245 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1246#endif
1247 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1248 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1249 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1250 pVCpu->iem.s.enmEffAddrMode = enmMode;
1251 if (enmMode != IEMMODE_64BIT)
1252 {
1253 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1254 pVCpu->iem.s.enmEffOpSize = enmMode;
1255 }
1256 else
1257 {
1258 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1259 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1260 }
1261 pVCpu->iem.s.fPrefixes = 0;
1262 pVCpu->iem.s.uRexReg = 0;
1263 pVCpu->iem.s.uRexB = 0;
1264 pVCpu->iem.s.uRexIndex = 0;
1265 pVCpu->iem.s.idxPrefix = 0;
1266 pVCpu->iem.s.uVex3rdReg = 0;
1267 pVCpu->iem.s.uVexLength = 0;
1268 pVCpu->iem.s.fEvexStuff = 0;
1269 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1270#ifdef IEM_WITH_CODE_TLB
1271 if (pVCpu->iem.s.pbInstrBuf)
1272 {
1273 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1274 - pVCpu->iem.s.uInstrBufPc;
1275 if (off < pVCpu->iem.s.cbInstrBufTotal)
1276 {
1277 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1278 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1279 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1280 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1281 else
1282 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1283 }
1284 else
1285 {
1286 pVCpu->iem.s.pbInstrBuf = NULL;
1287 pVCpu->iem.s.offInstrNextByte = 0;
1288 pVCpu->iem.s.offCurInstrStart = 0;
1289 pVCpu->iem.s.cbInstrBuf = 0;
1290 pVCpu->iem.s.cbInstrBufTotal = 0;
1291 }
1292 }
1293 else
1294 {
1295 pVCpu->iem.s.offInstrNextByte = 0;
1296 pVCpu->iem.s.offCurInstrStart = 0;
1297 pVCpu->iem.s.cbInstrBuf = 0;
1298 pVCpu->iem.s.cbInstrBufTotal = 0;
1299 }
1300#else
1301 pVCpu->iem.s.cbOpcode = 0;
1302 pVCpu->iem.s.offOpcode = 0;
1303#endif
1304 Assert(pVCpu->iem.s.cActiveMappings == 0);
1305 pVCpu->iem.s.iNextMapping = 0;
1306 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1307 Assert(pVCpu->iem.s.fBypassHandlers == false);
1308#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1309 if (!pVCpu->iem.s.fInPatchCode)
1310 { /* likely */ }
1311 else
1312 {
1313 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1314 && pCtx->cs.u64Base == 0
1315 && pCtx->cs.u32Limit == UINT32_MAX
1316 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1317 if (!pVCpu->iem.s.fInPatchCode)
1318 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1319 }
1320#endif
1321
1322#ifdef DBGFTRACE_ENABLED
1323 switch (enmMode)
1324 {
1325 case IEMMODE_64BIT:
1326 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1327 break;
1328 case IEMMODE_32BIT:
1329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1330 break;
1331 case IEMMODE_16BIT:
1332 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1333 break;
1334 }
1335#endif
1336}
1337
1338
1339
1340/**
1341 * Prefetch opcodes the first time when starting executing.
1342 *
1343 * @returns Strict VBox status code.
1344 * @param pVCpu The cross context virtual CPU structure of the
1345 * calling thread.
1346 * @param fBypassHandlers Whether to bypass access handlers.
1347 */
1348IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1349{
1350#ifdef IEM_VERIFICATION_MODE_FULL
1351 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1352#endif
1353 iemInitDecoder(pVCpu, fBypassHandlers);
1354
1355#ifdef IEM_WITH_CODE_TLB
1356 /** @todo Do ITLB lookup here. */
1357
1358#else /* !IEM_WITH_CODE_TLB */
1359
1360 /*
1361 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1362 *
1363 * First translate CS:rIP to a physical address.
1364 */
1365 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1366 uint32_t cbToTryRead;
1367 RTGCPTR GCPtrPC;
1368 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1369 {
1370 cbToTryRead = PAGE_SIZE;
1371 GCPtrPC = pCtx->rip;
1372 if (IEM_IS_CANONICAL(GCPtrPC))
1373 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1374 else
1375 return iemRaiseGeneralProtectionFault0(pVCpu);
1376 }
1377 else
1378 {
1379 uint32_t GCPtrPC32 = pCtx->eip;
1380 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1381 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1382 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1383 else
1384 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1385 if (cbToTryRead) { /* likely */ }
1386 else /* overflowed */
1387 {
1388 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1389 cbToTryRead = UINT32_MAX;
1390 }
1391 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1392 Assert(GCPtrPC <= UINT32_MAX);
1393 }
1394
1395# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1396 /* Allow interpretation of patch manager code blocks since they can for
1397 instance throw #PFs for perfectly good reasons. */
1398 if (pVCpu->iem.s.fInPatchCode)
1399 {
1400 size_t cbRead = 0;
1401 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1402 AssertRCReturn(rc, rc);
1403 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1404 return VINF_SUCCESS;
1405 }
1406# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1407
1408 RTGCPHYS GCPhys;
1409 uint64_t fFlags;
1410 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1411 if (RT_SUCCESS(rc)) { /* probable */ }
1412 else
1413 {
1414 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1415 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1416 }
1417 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1418 else
1419 {
1420 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1421 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1422 }
1423 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1424 else
1425 {
1426 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1427 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1428 }
1429 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1430 /** @todo Check reserved bits and such stuff. PGM is better at doing
1431 * that, so do it when implementing the guest virtual address
1432 * TLB... */
1433
1434# ifdef IEM_VERIFICATION_MODE_FULL
1435 /*
1436 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1437 * instruction.
1438 */
1439 /** @todo optimize this differently by not using PGMPhysRead. */
1440 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1441 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1442 if ( offPrevOpcodes < cbOldOpcodes
1443 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1444 {
1445 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1446 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1447 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1448 pVCpu->iem.s.cbOpcode = cbNew;
1449 return VINF_SUCCESS;
1450 }
1451# endif
1452
1453 /*
1454 * Read the bytes at this address.
1455 */
1456 PVM pVM = pVCpu->CTX_SUFF(pVM);
1457# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1458 size_t cbActual;
1459 if ( PATMIsEnabled(pVM)
1460 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1461 {
1462 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1463 Assert(cbActual > 0);
1464 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1465 }
1466 else
1467# endif
1468 {
1469 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1470 if (cbToTryRead > cbLeftOnPage)
1471 cbToTryRead = cbLeftOnPage;
1472 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1473 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1474
1475 if (!pVCpu->iem.s.fBypassHandlers)
1476 {
1477 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1478 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1479 { /* likely */ }
1480 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1481 {
1482 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1483 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1484 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1485 }
1486 else
1487 {
1488 Log((RT_SUCCESS(rcStrict)
1489 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1490 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1491 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1492 return rcStrict;
1493 }
1494 }
1495 else
1496 {
1497 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1498 if (RT_SUCCESS(rc))
1499 { /* likely */ }
1500 else
1501 {
1502 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1503 GCPtrPC, GCPhys, rc, cbToTryRead));
1504 return rc;
1505 }
1506 }
1507 pVCpu->iem.s.cbOpcode = cbToTryRead;
1508 }
1509#endif /* !IEM_WITH_CODE_TLB */
1510 return VINF_SUCCESS;
1511}
1512
1513
1514/**
1515 * Invalidates the IEM TLBs.
1516 *
1517 * This is called internally as well as by PGM when moving GC mappings.
1518 *
1519 * @returns
1520 * @param pVCpu The cross context virtual CPU structure of the calling
1521 * thread.
1522 * @param fVmm Set when PGM calls us with a remapping.
1523 */
1524VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1525{
1526#ifdef IEM_WITH_CODE_TLB
1527 pVCpu->iem.s.cbInstrBufTotal = 0;
1528 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1529 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1530 { /* very likely */ }
1531 else
1532 {
1533 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1534 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1535 while (i-- > 0)
1536 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1537 }
1538#endif
1539
1540#ifdef IEM_WITH_DATA_TLB
1541 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1542 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1543 { /* very likely */ }
1544 else
1545 {
1546 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1547 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1548 while (i-- > 0)
1549 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1550 }
1551#endif
1552 NOREF(pVCpu); NOREF(fVmm);
1553}
1554
1555
1556/**
1557 * Invalidates a page in the TLBs.
1558 *
1559 * @param pVCpu The cross context virtual CPU structure of the calling
1560 * thread.
1561 * @param GCPtr The address of the page to invalidate
1562 */
1563VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1564{
1565#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1566 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1567 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1568 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1569 uintptr_t idx = (uint8_t)GCPtr;
1570
1571# ifdef IEM_WITH_CODE_TLB
1572 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1573 {
1574 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1575 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1576 pVCpu->iem.s.cbInstrBufTotal = 0;
1577 }
1578# endif
1579
1580# ifdef IEM_WITH_DATA_TLB
1581 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1582 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1583# endif
1584#else
1585 NOREF(pVCpu); NOREF(GCPtr);
1586#endif
1587}
1588
1589
1590/**
1591 * Invalidates the host physical aspects of the IEM TLBs.
1592 *
1593 * This is called internally as well as by PGM when moving GC mappings.
1594 *
1595 * @param pVCpu The cross context virtual CPU structure of the calling
1596 * thread.
1597 */
1598VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1599{
1600#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1601 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1602
1603# ifdef IEM_WITH_CODE_TLB
1604 pVCpu->iem.s.cbInstrBufTotal = 0;
1605# endif
1606 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1607 if (uTlbPhysRev != 0)
1608 {
1609 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1610 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1611 }
1612 else
1613 {
1614 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1615 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1616
1617 unsigned i;
1618# ifdef IEM_WITH_CODE_TLB
1619 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1620 while (i-- > 0)
1621 {
1622 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1623 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1624 }
1625# endif
1626# ifdef IEM_WITH_DATA_TLB
1627 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1628 while (i-- > 0)
1629 {
1630 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1631 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1632 }
1633# endif
1634 }
1635#else
1636 NOREF(pVCpu);
1637#endif
1638}
1639
1640
1641/**
1642 * Invalidates the host physical aspects of the IEM TLBs.
1643 *
1644 * This is called internally as well as by PGM when moving GC mappings.
1645 *
1646 * @param pVM The cross context VM structure.
1647 *
1648 * @remarks Caller holds the PGM lock.
1649 */
1650VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1651{
1652 RT_NOREF_PV(pVM);
1653}
1654
1655#ifdef IEM_WITH_CODE_TLB
1656
1657/**
1658 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1659 * failure and jumps.
1660 *
1661 * We end up here for a number of reasons:
1662 * - pbInstrBuf isn't yet initialized.
1663 * - Advancing beyond the buffer boundrary (e.g. cross page).
1664 * - Advancing beyond the CS segment limit.
1665 * - Fetching from non-mappable page (e.g. MMIO).
1666 *
1667 * @param pVCpu The cross context virtual CPU structure of the
1668 * calling thread.
1669 * @param pvDst Where to return the bytes.
1670 * @param cbDst Number of bytes to read.
1671 *
1672 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1673 */
1674IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1675{
1676#ifdef IN_RING3
1677//__debugbreak();
1678 for (;;)
1679 {
1680 Assert(cbDst <= 8);
1681 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1682
1683 /*
1684 * We might have a partial buffer match, deal with that first to make the
1685 * rest simpler. This is the first part of the cross page/buffer case.
1686 */
1687 if (pVCpu->iem.s.pbInstrBuf != NULL)
1688 {
1689 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1690 {
1691 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1692 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1693 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1694
1695 cbDst -= cbCopy;
1696 pvDst = (uint8_t *)pvDst + cbCopy;
1697 offBuf += cbCopy;
1698 pVCpu->iem.s.offInstrNextByte += offBuf;
1699 }
1700 }
1701
1702 /*
1703 * Check segment limit, figuring how much we're allowed to access at this point.
1704 *
1705 * We will fault immediately if RIP is past the segment limit / in non-canonical
1706 * territory. If we do continue, there are one or more bytes to read before we
1707 * end up in trouble and we need to do that first before faulting.
1708 */
1709 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1710 RTGCPTR GCPtrFirst;
1711 uint32_t cbMaxRead;
1712 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1713 {
1714 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1715 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1716 { /* likely */ }
1717 else
1718 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1719 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1720 }
1721 else
1722 {
1723 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1724 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1725 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1726 { /* likely */ }
1727 else
1728 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1729 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1730 if (cbMaxRead != 0)
1731 { /* likely */ }
1732 else
1733 {
1734 /* Overflowed because address is 0 and limit is max. */
1735 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1736 cbMaxRead = X86_PAGE_SIZE;
1737 }
1738 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1739 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1740 if (cbMaxRead2 < cbMaxRead)
1741 cbMaxRead = cbMaxRead2;
1742 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1743 }
1744
1745 /*
1746 * Get the TLB entry for this piece of code.
1747 */
1748 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1749 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1750 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1751 if (pTlbe->uTag == uTag)
1752 {
1753 /* likely when executing lots of code, otherwise unlikely */
1754# ifdef VBOX_WITH_STATISTICS
1755 pVCpu->iem.s.CodeTlb.cTlbHits++;
1756# endif
1757 }
1758 else
1759 {
1760 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1761# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1762 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1763 {
1764 pTlbe->uTag = uTag;
1765 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1766 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1767 pTlbe->GCPhys = NIL_RTGCPHYS;
1768 pTlbe->pbMappingR3 = NULL;
1769 }
1770 else
1771# endif
1772 {
1773 RTGCPHYS GCPhys;
1774 uint64_t fFlags;
1775 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1776 if (RT_FAILURE(rc))
1777 {
1778 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1779 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1780 }
1781
1782 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1783 pTlbe->uTag = uTag;
1784 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1785 pTlbe->GCPhys = GCPhys;
1786 pTlbe->pbMappingR3 = NULL;
1787 }
1788 }
1789
1790 /*
1791 * Check TLB page table level access flags.
1792 */
1793 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1794 {
1795 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1796 {
1797 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1798 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1799 }
1800 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1801 {
1802 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1803 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1804 }
1805 }
1806
1807# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1808 /*
1809 * Allow interpretation of patch manager code blocks since they can for
1810 * instance throw #PFs for perfectly good reasons.
1811 */
1812 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1813 { /* no unlikely */ }
1814 else
1815 {
1816 /** @todo Could be optimized this a little in ring-3 if we liked. */
1817 size_t cbRead = 0;
1818 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1819 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1820 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1821 return;
1822 }
1823# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1824
1825 /*
1826 * Look up the physical page info if necessary.
1827 */
1828 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1829 { /* not necessary */ }
1830 else
1831 {
1832 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1833 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1834 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1835 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1836 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1837 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1838 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1839 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1840 }
1841
1842# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1843 /*
1844 * Try do a direct read using the pbMappingR3 pointer.
1845 */
1846 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1847 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1848 {
1849 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1850 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1851 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1852 {
1853 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1854 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1855 }
1856 else
1857 {
1858 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1859 Assert(cbInstr < cbMaxRead);
1860 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1861 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1862 }
1863 if (cbDst <= cbMaxRead)
1864 {
1865 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1866 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1867 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1868 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1869 return;
1870 }
1871 pVCpu->iem.s.pbInstrBuf = NULL;
1872
1873 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1874 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1875 }
1876 else
1877# endif
1878#if 0
1879 /*
1880 * If there is no special read handling, so we can read a bit more and
1881 * put it in the prefetch buffer.
1882 */
1883 if ( cbDst < cbMaxRead
1884 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1885 {
1886 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1887 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1888 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1889 { /* likely */ }
1890 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1891 {
1892 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1893 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1894 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1895 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1896 }
1897 else
1898 {
1899 Log((RT_SUCCESS(rcStrict)
1900 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1901 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1902 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1903 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1904 }
1905 }
1906 /*
1907 * Special read handling, so only read exactly what's needed.
1908 * This is a highly unlikely scenario.
1909 */
1910 else
1911#endif
1912 {
1913 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1914 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1915 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1916 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1917 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1918 { /* likely */ }
1919 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1920 {
1921 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1922 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1923 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1924 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1925 }
1926 else
1927 {
1928 Log((RT_SUCCESS(rcStrict)
1929 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1930 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1931 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1932 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1933 }
1934 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1935 if (cbToRead == cbDst)
1936 return;
1937 }
1938
1939 /*
1940 * More to read, loop.
1941 */
1942 cbDst -= cbMaxRead;
1943 pvDst = (uint8_t *)pvDst + cbMaxRead;
1944 }
1945#else
1946 RT_NOREF(pvDst, cbDst);
1947 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1948#endif
1949}
1950
1951#else
1952
1953/**
1954 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1955 * exception if it fails.
1956 *
1957 * @returns Strict VBox status code.
1958 * @param pVCpu The cross context virtual CPU structure of the
1959 * calling thread.
1960 * @param cbMin The minimum number of bytes relative offOpcode
1961 * that must be read.
1962 */
1963IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1964{
1965 /*
1966 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1967 *
1968 * First translate CS:rIP to a physical address.
1969 */
1970 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1971 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1972 uint32_t cbToTryRead;
1973 RTGCPTR GCPtrNext;
1974 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1975 {
1976 cbToTryRead = PAGE_SIZE;
1977 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1978 if (!IEM_IS_CANONICAL(GCPtrNext))
1979 return iemRaiseGeneralProtectionFault0(pVCpu);
1980 }
1981 else
1982 {
1983 uint32_t GCPtrNext32 = pCtx->eip;
1984 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1985 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1986 if (GCPtrNext32 > pCtx->cs.u32Limit)
1987 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1988 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1989 if (!cbToTryRead) /* overflowed */
1990 {
1991 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1992 cbToTryRead = UINT32_MAX;
1993 /** @todo check out wrapping around the code segment. */
1994 }
1995 if (cbToTryRead < cbMin - cbLeft)
1996 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1997 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1998 }
1999
2000 /* Only read up to the end of the page, and make sure we don't read more
2001 than the opcode buffer can hold. */
2002 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2003 if (cbToTryRead > cbLeftOnPage)
2004 cbToTryRead = cbLeftOnPage;
2005 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2006 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2007/** @todo r=bird: Convert assertion into undefined opcode exception? */
2008 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2009
2010# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2011 /* Allow interpretation of patch manager code blocks since they can for
2012 instance throw #PFs for perfectly good reasons. */
2013 if (pVCpu->iem.s.fInPatchCode)
2014 {
2015 size_t cbRead = 0;
2016 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2017 AssertRCReturn(rc, rc);
2018 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2019 return VINF_SUCCESS;
2020 }
2021# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2022
2023 RTGCPHYS GCPhys;
2024 uint64_t fFlags;
2025 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2026 if (RT_FAILURE(rc))
2027 {
2028 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2029 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2030 }
2031 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2032 {
2033 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2034 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2035 }
2036 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2037 {
2038 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2039 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2040 }
2041 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2042 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2043 /** @todo Check reserved bits and such stuff. PGM is better at doing
2044 * that, so do it when implementing the guest virtual address
2045 * TLB... */
2046
2047 /*
2048 * Read the bytes at this address.
2049 *
2050 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2051 * and since PATM should only patch the start of an instruction there
2052 * should be no need to check again here.
2053 */
2054 if (!pVCpu->iem.s.fBypassHandlers)
2055 {
2056 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2057 cbToTryRead, PGMACCESSORIGIN_IEM);
2058 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2059 { /* likely */ }
2060 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2061 {
2062 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2063 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2064 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2065 }
2066 else
2067 {
2068 Log((RT_SUCCESS(rcStrict)
2069 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2070 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2071 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2072 return rcStrict;
2073 }
2074 }
2075 else
2076 {
2077 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2078 if (RT_SUCCESS(rc))
2079 { /* likely */ }
2080 else
2081 {
2082 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2083 return rc;
2084 }
2085 }
2086 pVCpu->iem.s.cbOpcode += cbToTryRead;
2087 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2088
2089 return VINF_SUCCESS;
2090}
2091
2092#endif /* !IEM_WITH_CODE_TLB */
2093#ifndef IEM_WITH_SETJMP
2094
2095/**
2096 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2097 *
2098 * @returns Strict VBox status code.
2099 * @param pVCpu The cross context virtual CPU structure of the
2100 * calling thread.
2101 * @param pb Where to return the opcode byte.
2102 */
2103DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2104{
2105 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2106 if (rcStrict == VINF_SUCCESS)
2107 {
2108 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2109 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2110 pVCpu->iem.s.offOpcode = offOpcode + 1;
2111 }
2112 else
2113 *pb = 0;
2114 return rcStrict;
2115}
2116
2117
2118/**
2119 * Fetches the next opcode byte.
2120 *
2121 * @returns Strict VBox status code.
2122 * @param pVCpu The cross context virtual CPU structure of the
2123 * calling thread.
2124 * @param pu8 Where to return the opcode byte.
2125 */
2126DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2127{
2128 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2129 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2130 {
2131 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2132 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2133 return VINF_SUCCESS;
2134 }
2135 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2136}
2137
2138#else /* IEM_WITH_SETJMP */
2139
2140/**
2141 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2142 *
2143 * @returns The opcode byte.
2144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2145 */
2146DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2147{
2148# ifdef IEM_WITH_CODE_TLB
2149 uint8_t u8;
2150 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2151 return u8;
2152# else
2153 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2154 if (rcStrict == VINF_SUCCESS)
2155 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2156 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2157# endif
2158}
2159
2160
2161/**
2162 * Fetches the next opcode byte, longjmp on error.
2163 *
2164 * @returns The opcode byte.
2165 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2166 */
2167DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2168{
2169# ifdef IEM_WITH_CODE_TLB
2170 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2171 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2172 if (RT_LIKELY( pbBuf != NULL
2173 && offBuf < pVCpu->iem.s.cbInstrBuf))
2174 {
2175 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2176 return pbBuf[offBuf];
2177 }
2178# else
2179 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2180 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2181 {
2182 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2183 return pVCpu->iem.s.abOpcode[offOpcode];
2184 }
2185# endif
2186 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2187}
2188
2189#endif /* IEM_WITH_SETJMP */
2190
2191/**
2192 * Fetches the next opcode byte, returns automatically on failure.
2193 *
2194 * @param a_pu8 Where to return the opcode byte.
2195 * @remark Implicitly references pVCpu.
2196 */
2197#ifndef IEM_WITH_SETJMP
2198# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2199 do \
2200 { \
2201 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2202 if (rcStrict2 == VINF_SUCCESS) \
2203 { /* likely */ } \
2204 else \
2205 return rcStrict2; \
2206 } while (0)
2207#else
2208# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2209#endif /* IEM_WITH_SETJMP */
2210
2211
2212#ifndef IEM_WITH_SETJMP
2213/**
2214 * Fetches the next signed byte from the opcode stream.
2215 *
2216 * @returns Strict VBox status code.
2217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2218 * @param pi8 Where to return the signed byte.
2219 */
2220DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2221{
2222 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2223}
2224#endif /* !IEM_WITH_SETJMP */
2225
2226
2227/**
2228 * Fetches the next signed byte from the opcode stream, returning automatically
2229 * on failure.
2230 *
2231 * @param a_pi8 Where to return the signed byte.
2232 * @remark Implicitly references pVCpu.
2233 */
2234#ifndef IEM_WITH_SETJMP
2235# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2236 do \
2237 { \
2238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2239 if (rcStrict2 != VINF_SUCCESS) \
2240 return rcStrict2; \
2241 } while (0)
2242#else /* IEM_WITH_SETJMP */
2243# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2244
2245#endif /* IEM_WITH_SETJMP */
2246
2247#ifndef IEM_WITH_SETJMP
2248
2249/**
2250 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2251 *
2252 * @returns Strict VBox status code.
2253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2254 * @param pu16 Where to return the opcode dword.
2255 */
2256DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2257{
2258 uint8_t u8;
2259 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2260 if (rcStrict == VINF_SUCCESS)
2261 *pu16 = (int8_t)u8;
2262 return rcStrict;
2263}
2264
2265
2266/**
2267 * Fetches the next signed byte from the opcode stream, extending it to
2268 * unsigned 16-bit.
2269 *
2270 * @returns Strict VBox status code.
2271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2272 * @param pu16 Where to return the unsigned word.
2273 */
2274DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2275{
2276 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2277 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2278 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2279
2280 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2281 pVCpu->iem.s.offOpcode = offOpcode + 1;
2282 return VINF_SUCCESS;
2283}
2284
2285#endif /* !IEM_WITH_SETJMP */
2286
2287/**
2288 * Fetches the next signed byte from the opcode stream and sign-extending it to
2289 * a word, returning automatically on failure.
2290 *
2291 * @param a_pu16 Where to return the word.
2292 * @remark Implicitly references pVCpu.
2293 */
2294#ifndef IEM_WITH_SETJMP
2295# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2296 do \
2297 { \
2298 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2299 if (rcStrict2 != VINF_SUCCESS) \
2300 return rcStrict2; \
2301 } while (0)
2302#else
2303# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2304#endif
2305
2306#ifndef IEM_WITH_SETJMP
2307
2308/**
2309 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2310 *
2311 * @returns Strict VBox status code.
2312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2313 * @param pu32 Where to return the opcode dword.
2314 */
2315DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2316{
2317 uint8_t u8;
2318 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2319 if (rcStrict == VINF_SUCCESS)
2320 *pu32 = (int8_t)u8;
2321 return rcStrict;
2322}
2323
2324
2325/**
2326 * Fetches the next signed byte from the opcode stream, extending it to
2327 * unsigned 32-bit.
2328 *
2329 * @returns Strict VBox status code.
2330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2331 * @param pu32 Where to return the unsigned dword.
2332 */
2333DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2334{
2335 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2336 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2337 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2338
2339 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2340 pVCpu->iem.s.offOpcode = offOpcode + 1;
2341 return VINF_SUCCESS;
2342}
2343
2344#endif /* !IEM_WITH_SETJMP */
2345
2346/**
2347 * Fetches the next signed byte from the opcode stream and sign-extending it to
2348 * a word, returning automatically on failure.
2349 *
2350 * @param a_pu32 Where to return the word.
2351 * @remark Implicitly references pVCpu.
2352 */
2353#ifndef IEM_WITH_SETJMP
2354#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2355 do \
2356 { \
2357 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2358 if (rcStrict2 != VINF_SUCCESS) \
2359 return rcStrict2; \
2360 } while (0)
2361#else
2362# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2363#endif
2364
2365#ifndef IEM_WITH_SETJMP
2366
2367/**
2368 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2369 *
2370 * @returns Strict VBox status code.
2371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2372 * @param pu64 Where to return the opcode qword.
2373 */
2374DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2375{
2376 uint8_t u8;
2377 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2378 if (rcStrict == VINF_SUCCESS)
2379 *pu64 = (int8_t)u8;
2380 return rcStrict;
2381}
2382
2383
2384/**
2385 * Fetches the next signed byte from the opcode stream, extending it to
2386 * unsigned 64-bit.
2387 *
2388 * @returns Strict VBox status code.
2389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2390 * @param pu64 Where to return the unsigned qword.
2391 */
2392DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2393{
2394 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2395 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2396 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2397
2398 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2399 pVCpu->iem.s.offOpcode = offOpcode + 1;
2400 return VINF_SUCCESS;
2401}
2402
2403#endif /* !IEM_WITH_SETJMP */
2404
2405
2406/**
2407 * Fetches the next signed byte from the opcode stream and sign-extending it to
2408 * a word, returning automatically on failure.
2409 *
2410 * @param a_pu64 Where to return the word.
2411 * @remark Implicitly references pVCpu.
2412 */
2413#ifndef IEM_WITH_SETJMP
2414# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2415 do \
2416 { \
2417 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2418 if (rcStrict2 != VINF_SUCCESS) \
2419 return rcStrict2; \
2420 } while (0)
2421#else
2422# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2423#endif
2424
2425
2426#ifndef IEM_WITH_SETJMP
2427
2428/**
2429 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2430 *
2431 * @returns Strict VBox status code.
2432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2433 * @param pu16 Where to return the opcode word.
2434 */
2435DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2436{
2437 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2438 if (rcStrict == VINF_SUCCESS)
2439 {
2440 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2441# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2442 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2443# else
2444 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2445# endif
2446 pVCpu->iem.s.offOpcode = offOpcode + 2;
2447 }
2448 else
2449 *pu16 = 0;
2450 return rcStrict;
2451}
2452
2453
2454/**
2455 * Fetches the next opcode word.
2456 *
2457 * @returns Strict VBox status code.
2458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2459 * @param pu16 Where to return the opcode word.
2460 */
2461DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2462{
2463 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2464 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2465 {
2466 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2467# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2468 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2469# else
2470 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2471# endif
2472 return VINF_SUCCESS;
2473 }
2474 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2475}
2476
2477#else /* IEM_WITH_SETJMP */
2478
2479/**
2480 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2481 *
2482 * @returns The opcode word.
2483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2484 */
2485DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2486{
2487# ifdef IEM_WITH_CODE_TLB
2488 uint16_t u16;
2489 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2490 return u16;
2491# else
2492 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2493 if (rcStrict == VINF_SUCCESS)
2494 {
2495 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2496 pVCpu->iem.s.offOpcode += 2;
2497# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2498 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2499# else
2500 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2501# endif
2502 }
2503 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2504# endif
2505}
2506
2507
2508/**
2509 * Fetches the next opcode word, longjmp on error.
2510 *
2511 * @returns The opcode word.
2512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2513 */
2514DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2515{
2516# ifdef IEM_WITH_CODE_TLB
2517 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2518 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2519 if (RT_LIKELY( pbBuf != NULL
2520 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2521 {
2522 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2523# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2524 return *(uint16_t const *)&pbBuf[offBuf];
2525# else
2526 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2527# endif
2528 }
2529# else
2530 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2531 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2532 {
2533 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2534# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2535 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2536# else
2537 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2538# endif
2539 }
2540# endif
2541 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2542}
2543
2544#endif /* IEM_WITH_SETJMP */
2545
2546
2547/**
2548 * Fetches the next opcode word, returns automatically on failure.
2549 *
2550 * @param a_pu16 Where to return the opcode word.
2551 * @remark Implicitly references pVCpu.
2552 */
2553#ifndef IEM_WITH_SETJMP
2554# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2555 do \
2556 { \
2557 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2558 if (rcStrict2 != VINF_SUCCESS) \
2559 return rcStrict2; \
2560 } while (0)
2561#else
2562# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2563#endif
2564
2565#ifndef IEM_WITH_SETJMP
2566
2567/**
2568 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2569 *
2570 * @returns Strict VBox status code.
2571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2572 * @param pu32 Where to return the opcode double word.
2573 */
2574DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2575{
2576 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2577 if (rcStrict == VINF_SUCCESS)
2578 {
2579 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2580 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2581 pVCpu->iem.s.offOpcode = offOpcode + 2;
2582 }
2583 else
2584 *pu32 = 0;
2585 return rcStrict;
2586}
2587
2588
2589/**
2590 * Fetches the next opcode word, zero extending it to a double word.
2591 *
2592 * @returns Strict VBox status code.
2593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2594 * @param pu32 Where to return the opcode double word.
2595 */
2596DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2597{
2598 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2599 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2600 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2601
2602 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2603 pVCpu->iem.s.offOpcode = offOpcode + 2;
2604 return VINF_SUCCESS;
2605}
2606
2607#endif /* !IEM_WITH_SETJMP */
2608
2609
2610/**
2611 * Fetches the next opcode word and zero extends it to a double word, returns
2612 * automatically on failure.
2613 *
2614 * @param a_pu32 Where to return the opcode double word.
2615 * @remark Implicitly references pVCpu.
2616 */
2617#ifndef IEM_WITH_SETJMP
2618# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2619 do \
2620 { \
2621 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2622 if (rcStrict2 != VINF_SUCCESS) \
2623 return rcStrict2; \
2624 } while (0)
2625#else
2626# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2627#endif
2628
2629#ifndef IEM_WITH_SETJMP
2630
2631/**
2632 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2633 *
2634 * @returns Strict VBox status code.
2635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2636 * @param pu64 Where to return the opcode quad word.
2637 */
2638DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2639{
2640 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2641 if (rcStrict == VINF_SUCCESS)
2642 {
2643 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2644 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2645 pVCpu->iem.s.offOpcode = offOpcode + 2;
2646 }
2647 else
2648 *pu64 = 0;
2649 return rcStrict;
2650}
2651
2652
2653/**
2654 * Fetches the next opcode word, zero extending it to a quad word.
2655 *
2656 * @returns Strict VBox status code.
2657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2658 * @param pu64 Where to return the opcode quad word.
2659 */
2660DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2661{
2662 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2663 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2664 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2665
2666 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2667 pVCpu->iem.s.offOpcode = offOpcode + 2;
2668 return VINF_SUCCESS;
2669}
2670
2671#endif /* !IEM_WITH_SETJMP */
2672
2673/**
2674 * Fetches the next opcode word and zero extends it to a quad word, returns
2675 * automatically on failure.
2676 *
2677 * @param a_pu64 Where to return the opcode quad word.
2678 * @remark Implicitly references pVCpu.
2679 */
2680#ifndef IEM_WITH_SETJMP
2681# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2682 do \
2683 { \
2684 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2685 if (rcStrict2 != VINF_SUCCESS) \
2686 return rcStrict2; \
2687 } while (0)
2688#else
2689# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2690#endif
2691
2692
2693#ifndef IEM_WITH_SETJMP
2694/**
2695 * Fetches the next signed word from the opcode stream.
2696 *
2697 * @returns Strict VBox status code.
2698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2699 * @param pi16 Where to return the signed word.
2700 */
2701DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2702{
2703 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2704}
2705#endif /* !IEM_WITH_SETJMP */
2706
2707
2708/**
2709 * Fetches the next signed word from the opcode stream, returning automatically
2710 * on failure.
2711 *
2712 * @param a_pi16 Where to return the signed word.
2713 * @remark Implicitly references pVCpu.
2714 */
2715#ifndef IEM_WITH_SETJMP
2716# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2717 do \
2718 { \
2719 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2720 if (rcStrict2 != VINF_SUCCESS) \
2721 return rcStrict2; \
2722 } while (0)
2723#else
2724# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2725#endif
2726
2727#ifndef IEM_WITH_SETJMP
2728
2729/**
2730 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2731 *
2732 * @returns Strict VBox status code.
2733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2734 * @param pu32 Where to return the opcode dword.
2735 */
2736DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2737{
2738 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2739 if (rcStrict == VINF_SUCCESS)
2740 {
2741 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2742# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2743 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2744# else
2745 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2746 pVCpu->iem.s.abOpcode[offOpcode + 1],
2747 pVCpu->iem.s.abOpcode[offOpcode + 2],
2748 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2749# endif
2750 pVCpu->iem.s.offOpcode = offOpcode + 4;
2751 }
2752 else
2753 *pu32 = 0;
2754 return rcStrict;
2755}
2756
2757
2758/**
2759 * Fetches the next opcode dword.
2760 *
2761 * @returns Strict VBox status code.
2762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2763 * @param pu32 Where to return the opcode double word.
2764 */
2765DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2766{
2767 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2768 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2769 {
2770 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2771# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2772 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2773# else
2774 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2775 pVCpu->iem.s.abOpcode[offOpcode + 1],
2776 pVCpu->iem.s.abOpcode[offOpcode + 2],
2777 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2778# endif
2779 return VINF_SUCCESS;
2780 }
2781 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2782}
2783
2784#else /* !IEM_WITH_SETJMP */
2785
2786/**
2787 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2788 *
2789 * @returns The opcode dword.
2790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2791 */
2792DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2793{
2794# ifdef IEM_WITH_CODE_TLB
2795 uint32_t u32;
2796 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2797 return u32;
2798# else
2799 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2800 if (rcStrict == VINF_SUCCESS)
2801 {
2802 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2803 pVCpu->iem.s.offOpcode = offOpcode + 4;
2804# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2805 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2806# else
2807 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2808 pVCpu->iem.s.abOpcode[offOpcode + 1],
2809 pVCpu->iem.s.abOpcode[offOpcode + 2],
2810 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2811# endif
2812 }
2813 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2814# endif
2815}
2816
2817
2818/**
2819 * Fetches the next opcode dword, longjmp on error.
2820 *
2821 * @returns The opcode dword.
2822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2823 */
2824DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2825{
2826# ifdef IEM_WITH_CODE_TLB
2827 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2828 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2829 if (RT_LIKELY( pbBuf != NULL
2830 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2831 {
2832 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2833# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2834 return *(uint32_t const *)&pbBuf[offBuf];
2835# else
2836 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2837 pbBuf[offBuf + 1],
2838 pbBuf[offBuf + 2],
2839 pbBuf[offBuf + 3]);
2840# endif
2841 }
2842# else
2843 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2844 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2845 {
2846 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2847# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2848 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2849# else
2850 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2851 pVCpu->iem.s.abOpcode[offOpcode + 1],
2852 pVCpu->iem.s.abOpcode[offOpcode + 2],
2853 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2854# endif
2855 }
2856# endif
2857 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2858}
2859
2860#endif /* !IEM_WITH_SETJMP */
2861
2862
2863/**
2864 * Fetches the next opcode dword, returns automatically on failure.
2865 *
2866 * @param a_pu32 Where to return the opcode dword.
2867 * @remark Implicitly references pVCpu.
2868 */
2869#ifndef IEM_WITH_SETJMP
2870# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2871 do \
2872 { \
2873 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2874 if (rcStrict2 != VINF_SUCCESS) \
2875 return rcStrict2; \
2876 } while (0)
2877#else
2878# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2879#endif
2880
2881#ifndef IEM_WITH_SETJMP
2882
2883/**
2884 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2885 *
2886 * @returns Strict VBox status code.
2887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2888 * @param pu64 Where to return the opcode dword.
2889 */
2890DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2891{
2892 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2893 if (rcStrict == VINF_SUCCESS)
2894 {
2895 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2896 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2897 pVCpu->iem.s.abOpcode[offOpcode + 1],
2898 pVCpu->iem.s.abOpcode[offOpcode + 2],
2899 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2900 pVCpu->iem.s.offOpcode = offOpcode + 4;
2901 }
2902 else
2903 *pu64 = 0;
2904 return rcStrict;
2905}
2906
2907
2908/**
2909 * Fetches the next opcode dword, zero extending it to a quad word.
2910 *
2911 * @returns Strict VBox status code.
2912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2913 * @param pu64 Where to return the opcode quad word.
2914 */
2915DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2916{
2917 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2918 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2919 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2920
2921 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2922 pVCpu->iem.s.abOpcode[offOpcode + 1],
2923 pVCpu->iem.s.abOpcode[offOpcode + 2],
2924 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2925 pVCpu->iem.s.offOpcode = offOpcode + 4;
2926 return VINF_SUCCESS;
2927}
2928
2929#endif /* !IEM_WITH_SETJMP */
2930
2931
2932/**
2933 * Fetches the next opcode dword and zero extends it to a quad word, returns
2934 * automatically on failure.
2935 *
2936 * @param a_pu64 Where to return the opcode quad word.
2937 * @remark Implicitly references pVCpu.
2938 */
2939#ifndef IEM_WITH_SETJMP
2940# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2941 do \
2942 { \
2943 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2944 if (rcStrict2 != VINF_SUCCESS) \
2945 return rcStrict2; \
2946 } while (0)
2947#else
2948# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2949#endif
2950
2951
2952#ifndef IEM_WITH_SETJMP
2953/**
2954 * Fetches the next signed double word from the opcode stream.
2955 *
2956 * @returns Strict VBox status code.
2957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2958 * @param pi32 Where to return the signed double word.
2959 */
2960DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2961{
2962 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2963}
2964#endif
2965
2966/**
2967 * Fetches the next signed double word from the opcode stream, returning
2968 * automatically on failure.
2969 *
2970 * @param a_pi32 Where to return the signed double word.
2971 * @remark Implicitly references pVCpu.
2972 */
2973#ifndef IEM_WITH_SETJMP
2974# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2975 do \
2976 { \
2977 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2978 if (rcStrict2 != VINF_SUCCESS) \
2979 return rcStrict2; \
2980 } while (0)
2981#else
2982# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2983#endif
2984
2985#ifndef IEM_WITH_SETJMP
2986
2987/**
2988 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2989 *
2990 * @returns Strict VBox status code.
2991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2992 * @param pu64 Where to return the opcode qword.
2993 */
2994DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2995{
2996 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2997 if (rcStrict == VINF_SUCCESS)
2998 {
2999 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3000 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3001 pVCpu->iem.s.abOpcode[offOpcode + 1],
3002 pVCpu->iem.s.abOpcode[offOpcode + 2],
3003 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3004 pVCpu->iem.s.offOpcode = offOpcode + 4;
3005 }
3006 else
3007 *pu64 = 0;
3008 return rcStrict;
3009}
3010
3011
3012/**
3013 * Fetches the next opcode dword, sign extending it into a quad word.
3014 *
3015 * @returns Strict VBox status code.
3016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3017 * @param pu64 Where to return the opcode quad word.
3018 */
3019DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3020{
3021 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3022 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3023 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3024
3025 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3026 pVCpu->iem.s.abOpcode[offOpcode + 1],
3027 pVCpu->iem.s.abOpcode[offOpcode + 2],
3028 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3029 *pu64 = i32;
3030 pVCpu->iem.s.offOpcode = offOpcode + 4;
3031 return VINF_SUCCESS;
3032}
3033
3034#endif /* !IEM_WITH_SETJMP */
3035
3036
3037/**
3038 * Fetches the next opcode double word and sign extends it to a quad word,
3039 * returns automatically on failure.
3040 *
3041 * @param a_pu64 Where to return the opcode quad word.
3042 * @remark Implicitly references pVCpu.
3043 */
3044#ifndef IEM_WITH_SETJMP
3045# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3046 do \
3047 { \
3048 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3049 if (rcStrict2 != VINF_SUCCESS) \
3050 return rcStrict2; \
3051 } while (0)
3052#else
3053# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3054#endif
3055
3056#ifndef IEM_WITH_SETJMP
3057
3058/**
3059 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3060 *
3061 * @returns Strict VBox status code.
3062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3063 * @param pu64 Where to return the opcode qword.
3064 */
3065DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3066{
3067 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3068 if (rcStrict == VINF_SUCCESS)
3069 {
3070 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3071# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3072 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3073# else
3074 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3075 pVCpu->iem.s.abOpcode[offOpcode + 1],
3076 pVCpu->iem.s.abOpcode[offOpcode + 2],
3077 pVCpu->iem.s.abOpcode[offOpcode + 3],
3078 pVCpu->iem.s.abOpcode[offOpcode + 4],
3079 pVCpu->iem.s.abOpcode[offOpcode + 5],
3080 pVCpu->iem.s.abOpcode[offOpcode + 6],
3081 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3082# endif
3083 pVCpu->iem.s.offOpcode = offOpcode + 8;
3084 }
3085 else
3086 *pu64 = 0;
3087 return rcStrict;
3088}
3089
3090
3091/**
3092 * Fetches the next opcode qword.
3093 *
3094 * @returns Strict VBox status code.
3095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3096 * @param pu64 Where to return the opcode qword.
3097 */
3098DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3099{
3100 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3101 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3102 {
3103# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3104 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3105# else
3106 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3107 pVCpu->iem.s.abOpcode[offOpcode + 1],
3108 pVCpu->iem.s.abOpcode[offOpcode + 2],
3109 pVCpu->iem.s.abOpcode[offOpcode + 3],
3110 pVCpu->iem.s.abOpcode[offOpcode + 4],
3111 pVCpu->iem.s.abOpcode[offOpcode + 5],
3112 pVCpu->iem.s.abOpcode[offOpcode + 6],
3113 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3114# endif
3115 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3116 return VINF_SUCCESS;
3117 }
3118 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3119}
3120
3121#else /* IEM_WITH_SETJMP */
3122
3123/**
3124 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3125 *
3126 * @returns The opcode qword.
3127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3128 */
3129DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3130{
3131# ifdef IEM_WITH_CODE_TLB
3132 uint64_t u64;
3133 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3134 return u64;
3135# else
3136 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3137 if (rcStrict == VINF_SUCCESS)
3138 {
3139 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3140 pVCpu->iem.s.offOpcode = offOpcode + 8;
3141# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3142 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3143# else
3144 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3145 pVCpu->iem.s.abOpcode[offOpcode + 1],
3146 pVCpu->iem.s.abOpcode[offOpcode + 2],
3147 pVCpu->iem.s.abOpcode[offOpcode + 3],
3148 pVCpu->iem.s.abOpcode[offOpcode + 4],
3149 pVCpu->iem.s.abOpcode[offOpcode + 5],
3150 pVCpu->iem.s.abOpcode[offOpcode + 6],
3151 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3152# endif
3153 }
3154 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3155# endif
3156}
3157
3158
3159/**
3160 * Fetches the next opcode qword, longjmp on error.
3161 *
3162 * @returns The opcode qword.
3163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3164 */
3165DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3166{
3167# ifdef IEM_WITH_CODE_TLB
3168 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3169 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3170 if (RT_LIKELY( pbBuf != NULL
3171 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3172 {
3173 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3174# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3175 return *(uint64_t const *)&pbBuf[offBuf];
3176# else
3177 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3178 pbBuf[offBuf + 1],
3179 pbBuf[offBuf + 2],
3180 pbBuf[offBuf + 3],
3181 pbBuf[offBuf + 4],
3182 pbBuf[offBuf + 5],
3183 pbBuf[offBuf + 6],
3184 pbBuf[offBuf + 7]);
3185# endif
3186 }
3187# else
3188 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3189 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3190 {
3191 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3192# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3193 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3194# else
3195 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3196 pVCpu->iem.s.abOpcode[offOpcode + 1],
3197 pVCpu->iem.s.abOpcode[offOpcode + 2],
3198 pVCpu->iem.s.abOpcode[offOpcode + 3],
3199 pVCpu->iem.s.abOpcode[offOpcode + 4],
3200 pVCpu->iem.s.abOpcode[offOpcode + 5],
3201 pVCpu->iem.s.abOpcode[offOpcode + 6],
3202 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3203# endif
3204 }
3205# endif
3206 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3207}
3208
3209#endif /* IEM_WITH_SETJMP */
3210
3211/**
3212 * Fetches the next opcode quad word, returns automatically on failure.
3213 *
3214 * @param a_pu64 Where to return the opcode quad word.
3215 * @remark Implicitly references pVCpu.
3216 */
3217#ifndef IEM_WITH_SETJMP
3218# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3219 do \
3220 { \
3221 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3222 if (rcStrict2 != VINF_SUCCESS) \
3223 return rcStrict2; \
3224 } while (0)
3225#else
3226# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3227#endif
3228
3229
3230/** @name Misc Worker Functions.
3231 * @{
3232 */
3233
3234/**
3235 * Gets the exception class for the specified exception vector.
3236 *
3237 * @returns The class of the specified exception.
3238 * @param uVector The exception vector.
3239 */
3240IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3241{
3242 Assert(uVector <= X86_XCPT_LAST);
3243 switch (uVector)
3244 {
3245 case X86_XCPT_DE:
3246 case X86_XCPT_TS:
3247 case X86_XCPT_NP:
3248 case X86_XCPT_SS:
3249 case X86_XCPT_GP:
3250 case X86_XCPT_SX: /* AMD only */
3251 return IEMXCPTCLASS_CONTRIBUTORY;
3252
3253 case X86_XCPT_PF:
3254 case X86_XCPT_VE: /* Intel only */
3255 return IEMXCPTCLASS_PAGE_FAULT;
3256 }
3257 return IEMXCPTCLASS_BENIGN;
3258}
3259
3260
3261/**
3262 * Evaluates how to handle an exception caused during delivery of another event
3263 * (exception / interrupt).
3264 *
3265 * @returns How to handle the recursive exception.
3266 * @param pVCpu The cross context virtual CPU structure of the
3267 * calling thread.
3268 * @param fPrevFlags The flags of the previous event.
3269 * @param uPrevVector The vector of the previous event.
3270 * @param fCurFlags The flags of the current exception.
3271 * @param uCurVector The vector of the current exception.
3272 * @param pfXcptRaiseInfo Where to store additional information about the
3273 * exception condition. Optional.
3274 */
3275VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3276 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3277{
3278 /*
3279 * Only CPU exceptions can be raised while delivering other events, software interrupt
3280 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3281 */
3282 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3283 Assert(pVCpu); RT_NOREF(pVCpu);
3284
3285 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3286 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3287 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3288 {
3289 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3290 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3291 {
3292 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3293 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3294 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3295 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3296 {
3297 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3298 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3299 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3300 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3301 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3302 }
3303 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3304 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3305 {
3306 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3307 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%u uCurVector=%u -> #DF\n", uPrevVector, uCurVector));
3308 }
3309 else if ( uPrevVector == X86_XCPT_DF
3310 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3311 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3312 {
3313 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3314 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3315 }
3316 }
3317 else
3318 {
3319 if (uPrevVector == X86_XCPT_NMI)
3320 {
3321 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3322 if (uCurVector == X86_XCPT_PF)
3323 {
3324 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3325 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3326 }
3327 }
3328 else if ( uPrevVector == X86_XCPT_AC
3329 && uCurVector == X86_XCPT_AC)
3330 {
3331 enmRaise = IEMXCPTRAISE_CPU_HANG;
3332 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3333 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3334 }
3335 }
3336 }
3337 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3338 {
3339 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3340 if (uCurVector == X86_XCPT_PF)
3341 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3342 }
3343 else
3344 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3345
3346 if (pfXcptRaiseInfo)
3347 *pfXcptRaiseInfo = fRaiseInfo;
3348 return enmRaise;
3349}
3350
3351
3352/**
3353 * Enters the CPU shutdown state initiated by a triple fault or other
3354 * unrecoverable conditions.
3355 *
3356 * @returns Strict VBox status code.
3357 * @param pVCpu The cross context virtual CPU structure of the
3358 * calling thread.
3359 */
3360IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3361{
3362 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3363 {
3364 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3365 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3366 }
3367
3368 RT_NOREF(pVCpu);
3369 return VINF_EM_TRIPLE_FAULT;
3370}
3371
3372
3373#ifdef VBOX_WITH_NESTED_HWVIRT
3374IEM_STATIC VBOXSTRICTRC iemHandleSvmNstGstEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
3375 uint32_t uErr, uint64_t uCr2)
3376{
3377 Assert(IEM_IS_SVM_ENABLED(pVCpu));
3378
3379 /*
3380 * Handle nested-guest SVM exception and software interrupt intercepts,
3381 * see AMD spec. 15.12 "Exception Intercepts".
3382 *
3383 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.
3384 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
3385 * even when they use a vector in the range 0 to 31.
3386 * - ICEBP should not trigger #DB intercept, but its own intercept.
3387 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
3388 */
3389 /* Check NMI intercept */
3390 if ( u8Vector == X86_XCPT_NMI
3391 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3392 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
3393 {
3394 Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));
3395 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3396 }
3397
3398 /* Check ICEBP intercept. */
3399 if ( (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
3400 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_ICEBP))
3401 {
3402 Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));
3403 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3404 }
3405
3406 /* Check CPU exception intercepts. */
3407 if ( (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3408 && IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector))
3409 {
3410 Assert(u8Vector <= X86_XCPT_LAST);
3411 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
3412 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
3413 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist
3414 && u8Vector == X86_XCPT_PF
3415 && !(uErr & X86_TRAP_PF_ID))
3416 {
3417 /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */
3418#ifdef IEM_WITH_CODE_TLB
3419#else
3420 uint8_t const offOpCode = pVCpu->iem.s.offOpcode;
3421 uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;
3422 if ( cbCurrent > 0
3423 && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr))
3424 {
3425 Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));
3426 memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);
3427 }
3428#endif
3429 }
3430 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept. u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n",
3431 u8Vector, uExitInfo1, uExitInfo2));
3432 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);
3433 }
3434
3435 /* Check software interrupt (INTn) intercepts. */
3436 if ( (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3437 | IEM_XCPT_FLAGS_BP_INSTR
3438 | IEM_XCPT_FLAGS_ICEBP_INSTR
3439 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3440 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN))
3441 {
3442 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0;
3443 Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
3444 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
3445 }
3446
3447 return VINF_HM_INTERCEPT_NOT_ACTIVE;
3448}
3449#endif
3450
3451/**
3452 * Validates a new SS segment.
3453 *
3454 * @returns VBox strict status code.
3455 * @param pVCpu The cross context virtual CPU structure of the
3456 * calling thread.
3457 * @param pCtx The CPU context.
3458 * @param NewSS The new SS selctor.
3459 * @param uCpl The CPL to load the stack for.
3460 * @param pDesc Where to return the descriptor.
3461 */
3462IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3463{
3464 NOREF(pCtx);
3465
3466 /* Null selectors are not allowed (we're not called for dispatching
3467 interrupts with SS=0 in long mode). */
3468 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3469 {
3470 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3471 return iemRaiseTaskSwitchFault0(pVCpu);
3472 }
3473
3474 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3475 if ((NewSS & X86_SEL_RPL) != uCpl)
3476 {
3477 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3478 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3479 }
3480
3481 /*
3482 * Read the descriptor.
3483 */
3484 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3485 if (rcStrict != VINF_SUCCESS)
3486 return rcStrict;
3487
3488 /*
3489 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3490 */
3491 if (!pDesc->Legacy.Gen.u1DescType)
3492 {
3493 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3494 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3495 }
3496
3497 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3498 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3499 {
3500 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3501 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3502 }
3503 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3504 {
3505 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3506 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3507 }
3508
3509 /* Is it there? */
3510 /** @todo testcase: Is this checked before the canonical / limit check below? */
3511 if (!pDesc->Legacy.Gen.u1Present)
3512 {
3513 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3514 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3515 }
3516
3517 return VINF_SUCCESS;
3518}
3519
3520
3521/**
3522 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3523 * not.
3524 *
3525 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3526 * @param a_pCtx The CPU context.
3527 */
3528#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3529# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3530 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3531 ? (a_pCtx)->eflags.u \
3532 : CPUMRawGetEFlags(a_pVCpu) )
3533#else
3534# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3535 ( (a_pCtx)->eflags.u )
3536#endif
3537
3538/**
3539 * Updates the EFLAGS in the correct manner wrt. PATM.
3540 *
3541 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3542 * @param a_pCtx The CPU context.
3543 * @param a_fEfl The new EFLAGS.
3544 */
3545#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3546# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3547 do { \
3548 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3549 (a_pCtx)->eflags.u = (a_fEfl); \
3550 else \
3551 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3552 } while (0)
3553#else
3554# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3555 do { \
3556 (a_pCtx)->eflags.u = (a_fEfl); \
3557 } while (0)
3558#endif
3559
3560
3561/** @} */
3562
3563/** @name Raising Exceptions.
3564 *
3565 * @{
3566 */
3567
3568
3569/**
3570 * Loads the specified stack far pointer from the TSS.
3571 *
3572 * @returns VBox strict status code.
3573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3574 * @param pCtx The CPU context.
3575 * @param uCpl The CPL to load the stack for.
3576 * @param pSelSS Where to return the new stack segment.
3577 * @param puEsp Where to return the new stack pointer.
3578 */
3579IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3580 PRTSEL pSelSS, uint32_t *puEsp)
3581{
3582 VBOXSTRICTRC rcStrict;
3583 Assert(uCpl < 4);
3584
3585 switch (pCtx->tr.Attr.n.u4Type)
3586 {
3587 /*
3588 * 16-bit TSS (X86TSS16).
3589 */
3590 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); /* fall thru */
3591 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3592 {
3593 uint32_t off = uCpl * 4 + 2;
3594 if (off + 4 <= pCtx->tr.u32Limit)
3595 {
3596 /** @todo check actual access pattern here. */
3597 uint32_t u32Tmp = 0; /* gcc maybe... */
3598 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3599 if (rcStrict == VINF_SUCCESS)
3600 {
3601 *puEsp = RT_LOWORD(u32Tmp);
3602 *pSelSS = RT_HIWORD(u32Tmp);
3603 return VINF_SUCCESS;
3604 }
3605 }
3606 else
3607 {
3608 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3609 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3610 }
3611 break;
3612 }
3613
3614 /*
3615 * 32-bit TSS (X86TSS32).
3616 */
3617 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); /* fall thru */
3618 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3619 {
3620 uint32_t off = uCpl * 8 + 4;
3621 if (off + 7 <= pCtx->tr.u32Limit)
3622 {
3623/** @todo check actual access pattern here. */
3624 uint64_t u64Tmp;
3625 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3626 if (rcStrict == VINF_SUCCESS)
3627 {
3628 *puEsp = u64Tmp & UINT32_MAX;
3629 *pSelSS = (RTSEL)(u64Tmp >> 32);
3630 return VINF_SUCCESS;
3631 }
3632 }
3633 else
3634 {
3635 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3636 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3637 }
3638 break;
3639 }
3640
3641 default:
3642 AssertFailed();
3643 rcStrict = VERR_IEM_IPE_4;
3644 break;
3645 }
3646
3647 *puEsp = 0; /* make gcc happy */
3648 *pSelSS = 0; /* make gcc happy */
3649 return rcStrict;
3650}
3651
3652
3653/**
3654 * Loads the specified stack pointer from the 64-bit TSS.
3655 *
3656 * @returns VBox strict status code.
3657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3658 * @param pCtx The CPU context.
3659 * @param uCpl The CPL to load the stack for.
3660 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3661 * @param puRsp Where to return the new stack pointer.
3662 */
3663IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3664{
3665 Assert(uCpl < 4);
3666 Assert(uIst < 8);
3667 *puRsp = 0; /* make gcc happy */
3668
3669 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3670
3671 uint32_t off;
3672 if (uIst)
3673 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3674 else
3675 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3676 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3677 {
3678 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3679 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3680 }
3681
3682 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3683}
3684
3685
3686/**
3687 * Adjust the CPU state according to the exception being raised.
3688 *
3689 * @param pCtx The CPU context.
3690 * @param u8Vector The exception that has been raised.
3691 */
3692DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3693{
3694 switch (u8Vector)
3695 {
3696 case X86_XCPT_DB:
3697 pCtx->dr[7] &= ~X86_DR7_GD;
3698 break;
3699 /** @todo Read the AMD and Intel exception reference... */
3700 }
3701}
3702
3703
3704/**
3705 * Implements exceptions and interrupts for real mode.
3706 *
3707 * @returns VBox strict status code.
3708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3709 * @param pCtx The CPU context.
3710 * @param cbInstr The number of bytes to offset rIP by in the return
3711 * address.
3712 * @param u8Vector The interrupt / exception vector number.
3713 * @param fFlags The flags.
3714 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3715 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3716 */
3717IEM_STATIC VBOXSTRICTRC
3718iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3719 PCPUMCTX pCtx,
3720 uint8_t cbInstr,
3721 uint8_t u8Vector,
3722 uint32_t fFlags,
3723 uint16_t uErr,
3724 uint64_t uCr2)
3725{
3726 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3727 NOREF(uErr); NOREF(uCr2);
3728
3729 /*
3730 * Read the IDT entry.
3731 */
3732 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3733 {
3734 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3735 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3736 }
3737 RTFAR16 Idte;
3738 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3739 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3740 return rcStrict;
3741
3742 /*
3743 * Push the stack frame.
3744 */
3745 uint16_t *pu16Frame;
3746 uint64_t uNewRsp;
3747 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3748 if (rcStrict != VINF_SUCCESS)
3749 return rcStrict;
3750
3751 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3752#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3753 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3754 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3755 fEfl |= UINT16_C(0xf000);
3756#endif
3757 pu16Frame[2] = (uint16_t)fEfl;
3758 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3759 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3760 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3761 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3762 return rcStrict;
3763
3764 /*
3765 * Load the vector address into cs:ip and make exception specific state
3766 * adjustments.
3767 */
3768 pCtx->cs.Sel = Idte.sel;
3769 pCtx->cs.ValidSel = Idte.sel;
3770 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3771 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3772 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3773 pCtx->rip = Idte.off;
3774 fEfl &= ~X86_EFL_IF;
3775 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3776
3777 /** @todo do we actually do this in real mode? */
3778 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3779 iemRaiseXcptAdjustState(pCtx, u8Vector);
3780
3781 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3782}
3783
3784
3785/**
3786 * Loads a NULL data selector into when coming from V8086 mode.
3787 *
3788 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3789 * @param pSReg Pointer to the segment register.
3790 */
3791IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3792{
3793 pSReg->Sel = 0;
3794 pSReg->ValidSel = 0;
3795 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3796 {
3797 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3798 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3799 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3800 }
3801 else
3802 {
3803 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3804 /** @todo check this on AMD-V */
3805 pSReg->u64Base = 0;
3806 pSReg->u32Limit = 0;
3807 }
3808}
3809
3810
3811/**
3812 * Loads a segment selector during a task switch in V8086 mode.
3813 *
3814 * @param pSReg Pointer to the segment register.
3815 * @param uSel The selector value to load.
3816 */
3817IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3818{
3819 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3820 pSReg->Sel = uSel;
3821 pSReg->ValidSel = uSel;
3822 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3823 pSReg->u64Base = uSel << 4;
3824 pSReg->u32Limit = 0xffff;
3825 pSReg->Attr.u = 0xf3;
3826}
3827
3828
3829/**
3830 * Loads a NULL data selector into a selector register, both the hidden and
3831 * visible parts, in protected mode.
3832 *
3833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3834 * @param pSReg Pointer to the segment register.
3835 * @param uRpl The RPL.
3836 */
3837IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3838{
3839 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3840 * data selector in protected mode. */
3841 pSReg->Sel = uRpl;
3842 pSReg->ValidSel = uRpl;
3843 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3844 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3845 {
3846 /* VT-x (Intel 3960x) observed doing something like this. */
3847 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3848 pSReg->u32Limit = UINT32_MAX;
3849 pSReg->u64Base = 0;
3850 }
3851 else
3852 {
3853 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3854 pSReg->u32Limit = 0;
3855 pSReg->u64Base = 0;
3856 }
3857}
3858
3859
3860/**
3861 * Loads a segment selector during a task switch in protected mode.
3862 *
3863 * In this task switch scenario, we would throw \#TS exceptions rather than
3864 * \#GPs.
3865 *
3866 * @returns VBox strict status code.
3867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3868 * @param pSReg Pointer to the segment register.
3869 * @param uSel The new selector value.
3870 *
3871 * @remarks This does _not_ handle CS or SS.
3872 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3873 */
3874IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3875{
3876 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3877
3878 /* Null data selector. */
3879 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3880 {
3881 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3882 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3883 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3884 return VINF_SUCCESS;
3885 }
3886
3887 /* Fetch the descriptor. */
3888 IEMSELDESC Desc;
3889 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3890 if (rcStrict != VINF_SUCCESS)
3891 {
3892 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3893 VBOXSTRICTRC_VAL(rcStrict)));
3894 return rcStrict;
3895 }
3896
3897 /* Must be a data segment or readable code segment. */
3898 if ( !Desc.Legacy.Gen.u1DescType
3899 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3900 {
3901 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3902 Desc.Legacy.Gen.u4Type));
3903 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3904 }
3905
3906 /* Check privileges for data segments and non-conforming code segments. */
3907 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3908 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3909 {
3910 /* The RPL and the new CPL must be less than or equal to the DPL. */
3911 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3912 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3913 {
3914 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3915 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3916 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3917 }
3918 }
3919
3920 /* Is it there? */
3921 if (!Desc.Legacy.Gen.u1Present)
3922 {
3923 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3924 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3925 }
3926
3927 /* The base and limit. */
3928 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3929 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3930
3931 /*
3932 * Ok, everything checked out fine. Now set the accessed bit before
3933 * committing the result into the registers.
3934 */
3935 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3936 {
3937 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3938 if (rcStrict != VINF_SUCCESS)
3939 return rcStrict;
3940 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3941 }
3942
3943 /* Commit */
3944 pSReg->Sel = uSel;
3945 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3946 pSReg->u32Limit = cbLimit;
3947 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3948 pSReg->ValidSel = uSel;
3949 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3950 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3951 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3952
3953 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3954 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3955 return VINF_SUCCESS;
3956}
3957
3958
3959/**
3960 * Performs a task switch.
3961 *
3962 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3963 * caller is responsible for performing the necessary checks (like DPL, TSS
3964 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3965 * reference for JMP, CALL, IRET.
3966 *
3967 * If the task switch is the due to a software interrupt or hardware exception,
3968 * the caller is responsible for validating the TSS selector and descriptor. See
3969 * Intel Instruction reference for INT n.
3970 *
3971 * @returns VBox strict status code.
3972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3973 * @param pCtx The CPU context.
3974 * @param enmTaskSwitch What caused this task switch.
3975 * @param uNextEip The EIP effective after the task switch.
3976 * @param fFlags The flags.
3977 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3978 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3979 * @param SelTSS The TSS selector of the new task.
3980 * @param pNewDescTSS Pointer to the new TSS descriptor.
3981 */
3982IEM_STATIC VBOXSTRICTRC
3983iemTaskSwitch(PVMCPU pVCpu,
3984 PCPUMCTX pCtx,
3985 IEMTASKSWITCH enmTaskSwitch,
3986 uint32_t uNextEip,
3987 uint32_t fFlags,
3988 uint16_t uErr,
3989 uint64_t uCr2,
3990 RTSEL SelTSS,
3991 PIEMSELDESC pNewDescTSS)
3992{
3993 Assert(!IEM_IS_REAL_MODE(pVCpu));
3994 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3995
3996 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3997 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3998 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3999 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4000 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4001
4002 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4003 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4004
4005 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4006 fIsNewTSS386, pCtx->eip, uNextEip));
4007
4008 /* Update CR2 in case it's a page-fault. */
4009 /** @todo This should probably be done much earlier in IEM/PGM. See
4010 * @bugref{5653#c49}. */
4011 if (fFlags & IEM_XCPT_FLAGS_CR2)
4012 pCtx->cr2 = uCr2;
4013
4014 /*
4015 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4016 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4017 */
4018 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4019 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4020 if (uNewTSSLimit < uNewTSSLimitMin)
4021 {
4022 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4023 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4024 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4025 }
4026
4027 /*
4028 * Check the current TSS limit. The last written byte to the current TSS during the
4029 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4030 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4031 *
4032 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4033 * end up with smaller than "legal" TSS limits.
4034 */
4035 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
4036 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4037 if (uCurTSSLimit < uCurTSSLimitMin)
4038 {
4039 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4040 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4041 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4042 }
4043
4044 /*
4045 * Verify that the new TSS can be accessed and map it. Map only the required contents
4046 * and not the entire TSS.
4047 */
4048 void *pvNewTSS;
4049 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4050 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4051 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4052 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4053 * not perform correct translation if this happens. See Intel spec. 7.2.1
4054 * "Task-State Segment" */
4055 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4056 if (rcStrict != VINF_SUCCESS)
4057 {
4058 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4059 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4060 return rcStrict;
4061 }
4062
4063 /*
4064 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4065 */
4066 uint32_t u32EFlags = pCtx->eflags.u32;
4067 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4068 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4069 {
4070 PX86DESC pDescCurTSS;
4071 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4072 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4073 if (rcStrict != VINF_SUCCESS)
4074 {
4075 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4076 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4077 return rcStrict;
4078 }
4079
4080 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4081 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4082 if (rcStrict != VINF_SUCCESS)
4083 {
4084 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4085 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4086 return rcStrict;
4087 }
4088
4089 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4090 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4091 {
4092 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4093 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4094 u32EFlags &= ~X86_EFL_NT;
4095 }
4096 }
4097
4098 /*
4099 * Save the CPU state into the current TSS.
4100 */
4101 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4102 if (GCPtrNewTSS == GCPtrCurTSS)
4103 {
4104 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4105 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4106 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4107 }
4108 if (fIsNewTSS386)
4109 {
4110 /*
4111 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4112 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4113 */
4114 void *pvCurTSS32;
4115 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4116 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4117 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4118 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4119 if (rcStrict != VINF_SUCCESS)
4120 {
4121 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4122 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4123 return rcStrict;
4124 }
4125
4126 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4127 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4128 pCurTSS32->eip = uNextEip;
4129 pCurTSS32->eflags = u32EFlags;
4130 pCurTSS32->eax = pCtx->eax;
4131 pCurTSS32->ecx = pCtx->ecx;
4132 pCurTSS32->edx = pCtx->edx;
4133 pCurTSS32->ebx = pCtx->ebx;
4134 pCurTSS32->esp = pCtx->esp;
4135 pCurTSS32->ebp = pCtx->ebp;
4136 pCurTSS32->esi = pCtx->esi;
4137 pCurTSS32->edi = pCtx->edi;
4138 pCurTSS32->es = pCtx->es.Sel;
4139 pCurTSS32->cs = pCtx->cs.Sel;
4140 pCurTSS32->ss = pCtx->ss.Sel;
4141 pCurTSS32->ds = pCtx->ds.Sel;
4142 pCurTSS32->fs = pCtx->fs.Sel;
4143 pCurTSS32->gs = pCtx->gs.Sel;
4144
4145 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4146 if (rcStrict != VINF_SUCCESS)
4147 {
4148 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4149 VBOXSTRICTRC_VAL(rcStrict)));
4150 return rcStrict;
4151 }
4152 }
4153 else
4154 {
4155 /*
4156 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4157 */
4158 void *pvCurTSS16;
4159 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4160 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4161 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4162 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4163 if (rcStrict != VINF_SUCCESS)
4164 {
4165 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4166 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4167 return rcStrict;
4168 }
4169
4170 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4171 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4172 pCurTSS16->ip = uNextEip;
4173 pCurTSS16->flags = u32EFlags;
4174 pCurTSS16->ax = pCtx->ax;
4175 pCurTSS16->cx = pCtx->cx;
4176 pCurTSS16->dx = pCtx->dx;
4177 pCurTSS16->bx = pCtx->bx;
4178 pCurTSS16->sp = pCtx->sp;
4179 pCurTSS16->bp = pCtx->bp;
4180 pCurTSS16->si = pCtx->si;
4181 pCurTSS16->di = pCtx->di;
4182 pCurTSS16->es = pCtx->es.Sel;
4183 pCurTSS16->cs = pCtx->cs.Sel;
4184 pCurTSS16->ss = pCtx->ss.Sel;
4185 pCurTSS16->ds = pCtx->ds.Sel;
4186
4187 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4188 if (rcStrict != VINF_SUCCESS)
4189 {
4190 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4191 VBOXSTRICTRC_VAL(rcStrict)));
4192 return rcStrict;
4193 }
4194 }
4195
4196 /*
4197 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4198 */
4199 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4200 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4201 {
4202 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4203 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4204 pNewTSS->selPrev = pCtx->tr.Sel;
4205 }
4206
4207 /*
4208 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4209 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4210 */
4211 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4212 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4213 bool fNewDebugTrap;
4214 if (fIsNewTSS386)
4215 {
4216 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4217 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4218 uNewEip = pNewTSS32->eip;
4219 uNewEflags = pNewTSS32->eflags;
4220 uNewEax = pNewTSS32->eax;
4221 uNewEcx = pNewTSS32->ecx;
4222 uNewEdx = pNewTSS32->edx;
4223 uNewEbx = pNewTSS32->ebx;
4224 uNewEsp = pNewTSS32->esp;
4225 uNewEbp = pNewTSS32->ebp;
4226 uNewEsi = pNewTSS32->esi;
4227 uNewEdi = pNewTSS32->edi;
4228 uNewES = pNewTSS32->es;
4229 uNewCS = pNewTSS32->cs;
4230 uNewSS = pNewTSS32->ss;
4231 uNewDS = pNewTSS32->ds;
4232 uNewFS = pNewTSS32->fs;
4233 uNewGS = pNewTSS32->gs;
4234 uNewLdt = pNewTSS32->selLdt;
4235 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4236 }
4237 else
4238 {
4239 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4240 uNewCr3 = 0;
4241 uNewEip = pNewTSS16->ip;
4242 uNewEflags = pNewTSS16->flags;
4243 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4244 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4245 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4246 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4247 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4248 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4249 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4250 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4251 uNewES = pNewTSS16->es;
4252 uNewCS = pNewTSS16->cs;
4253 uNewSS = pNewTSS16->ss;
4254 uNewDS = pNewTSS16->ds;
4255 uNewFS = 0;
4256 uNewGS = 0;
4257 uNewLdt = pNewTSS16->selLdt;
4258 fNewDebugTrap = false;
4259 }
4260
4261 if (GCPtrNewTSS == GCPtrCurTSS)
4262 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4263 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4264
4265 /*
4266 * We're done accessing the new TSS.
4267 */
4268 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4269 if (rcStrict != VINF_SUCCESS)
4270 {
4271 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4272 return rcStrict;
4273 }
4274
4275 /*
4276 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4277 */
4278 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4279 {
4280 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4281 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4282 if (rcStrict != VINF_SUCCESS)
4283 {
4284 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4285 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4286 return rcStrict;
4287 }
4288
4289 /* Check that the descriptor indicates the new TSS is available (not busy). */
4290 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4291 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4292 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4293
4294 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4295 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4296 if (rcStrict != VINF_SUCCESS)
4297 {
4298 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4299 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4300 return rcStrict;
4301 }
4302 }
4303
4304 /*
4305 * From this point on, we're technically in the new task. We will defer exceptions
4306 * until the completion of the task switch but before executing any instructions in the new task.
4307 */
4308 pCtx->tr.Sel = SelTSS;
4309 pCtx->tr.ValidSel = SelTSS;
4310 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4311 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4312 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4313 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4314 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4315
4316 /* Set the busy bit in TR. */
4317 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4318 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4319 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4320 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4321 {
4322 uNewEflags |= X86_EFL_NT;
4323 }
4324
4325 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4326 pCtx->cr0 |= X86_CR0_TS;
4327 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4328
4329 pCtx->eip = uNewEip;
4330 pCtx->eax = uNewEax;
4331 pCtx->ecx = uNewEcx;
4332 pCtx->edx = uNewEdx;
4333 pCtx->ebx = uNewEbx;
4334 pCtx->esp = uNewEsp;
4335 pCtx->ebp = uNewEbp;
4336 pCtx->esi = uNewEsi;
4337 pCtx->edi = uNewEdi;
4338
4339 uNewEflags &= X86_EFL_LIVE_MASK;
4340 uNewEflags |= X86_EFL_RA1_MASK;
4341 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4342
4343 /*
4344 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4345 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4346 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4347 */
4348 pCtx->es.Sel = uNewES;
4349 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4350
4351 pCtx->cs.Sel = uNewCS;
4352 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4353
4354 pCtx->ss.Sel = uNewSS;
4355 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4356
4357 pCtx->ds.Sel = uNewDS;
4358 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4359
4360 pCtx->fs.Sel = uNewFS;
4361 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4362
4363 pCtx->gs.Sel = uNewGS;
4364 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4365 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4366
4367 pCtx->ldtr.Sel = uNewLdt;
4368 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4369 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4370 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4371
4372 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4373 {
4374 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4375 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4376 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4377 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4378 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4379 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4380 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4381 }
4382
4383 /*
4384 * Switch CR3 for the new task.
4385 */
4386 if ( fIsNewTSS386
4387 && (pCtx->cr0 & X86_CR0_PG))
4388 {
4389 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4390 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4391 {
4392 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4393 AssertRCSuccessReturn(rc, rc);
4394 }
4395 else
4396 pCtx->cr3 = uNewCr3;
4397
4398 /* Inform PGM. */
4399 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4400 {
4401 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4402 AssertRCReturn(rc, rc);
4403 /* ignore informational status codes */
4404 }
4405 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4406 }
4407
4408 /*
4409 * Switch LDTR for the new task.
4410 */
4411 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4412 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4413 else
4414 {
4415 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4416
4417 IEMSELDESC DescNewLdt;
4418 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4419 if (rcStrict != VINF_SUCCESS)
4420 {
4421 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4422 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4423 return rcStrict;
4424 }
4425 if ( !DescNewLdt.Legacy.Gen.u1Present
4426 || DescNewLdt.Legacy.Gen.u1DescType
4427 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4428 {
4429 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4430 uNewLdt, DescNewLdt.Legacy.u));
4431 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4432 }
4433
4434 pCtx->ldtr.ValidSel = uNewLdt;
4435 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4436 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4437 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4438 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4439 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4440 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4441 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4442 }
4443
4444 IEMSELDESC DescSS;
4445 if (IEM_IS_V86_MODE(pVCpu))
4446 {
4447 pVCpu->iem.s.uCpl = 3;
4448 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4449 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4450 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4451 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4452 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4453 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4454
4455 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4456 DescSS.Legacy.u = 0;
4457 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4458 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4459 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4460 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4461 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4462 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4463 DescSS.Legacy.Gen.u2Dpl = 3;
4464 }
4465 else
4466 {
4467 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4468
4469 /*
4470 * Load the stack segment for the new task.
4471 */
4472 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4473 {
4474 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4475 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4476 }
4477
4478 /* Fetch the descriptor. */
4479 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4480 if (rcStrict != VINF_SUCCESS)
4481 {
4482 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4483 VBOXSTRICTRC_VAL(rcStrict)));
4484 return rcStrict;
4485 }
4486
4487 /* SS must be a data segment and writable. */
4488 if ( !DescSS.Legacy.Gen.u1DescType
4489 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4490 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4491 {
4492 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4493 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4494 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4495 }
4496
4497 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4498 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4499 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4500 {
4501 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4502 uNewCpl));
4503 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4504 }
4505
4506 /* Is it there? */
4507 if (!DescSS.Legacy.Gen.u1Present)
4508 {
4509 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4510 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4511 }
4512
4513 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4514 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4515
4516 /* Set the accessed bit before committing the result into SS. */
4517 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4518 {
4519 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4520 if (rcStrict != VINF_SUCCESS)
4521 return rcStrict;
4522 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4523 }
4524
4525 /* Commit SS. */
4526 pCtx->ss.Sel = uNewSS;
4527 pCtx->ss.ValidSel = uNewSS;
4528 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4529 pCtx->ss.u32Limit = cbLimit;
4530 pCtx->ss.u64Base = u64Base;
4531 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4532 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4533
4534 /* CPL has changed, update IEM before loading rest of segments. */
4535 pVCpu->iem.s.uCpl = uNewCpl;
4536
4537 /*
4538 * Load the data segments for the new task.
4539 */
4540 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4541 if (rcStrict != VINF_SUCCESS)
4542 return rcStrict;
4543 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4544 if (rcStrict != VINF_SUCCESS)
4545 return rcStrict;
4546 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4547 if (rcStrict != VINF_SUCCESS)
4548 return rcStrict;
4549 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4550 if (rcStrict != VINF_SUCCESS)
4551 return rcStrict;
4552
4553 /*
4554 * Load the code segment for the new task.
4555 */
4556 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4557 {
4558 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4559 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4560 }
4561
4562 /* Fetch the descriptor. */
4563 IEMSELDESC DescCS;
4564 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4565 if (rcStrict != VINF_SUCCESS)
4566 {
4567 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4568 return rcStrict;
4569 }
4570
4571 /* CS must be a code segment. */
4572 if ( !DescCS.Legacy.Gen.u1DescType
4573 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4574 {
4575 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4576 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4577 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4578 }
4579
4580 /* For conforming CS, DPL must be less than or equal to the RPL. */
4581 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4582 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4583 {
4584 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4585 DescCS.Legacy.Gen.u2Dpl));
4586 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4587 }
4588
4589 /* For non-conforming CS, DPL must match RPL. */
4590 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4591 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4592 {
4593 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4594 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4595 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4596 }
4597
4598 /* Is it there? */
4599 if (!DescCS.Legacy.Gen.u1Present)
4600 {
4601 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4602 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4603 }
4604
4605 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4606 u64Base = X86DESC_BASE(&DescCS.Legacy);
4607
4608 /* Set the accessed bit before committing the result into CS. */
4609 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4610 {
4611 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4612 if (rcStrict != VINF_SUCCESS)
4613 return rcStrict;
4614 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4615 }
4616
4617 /* Commit CS. */
4618 pCtx->cs.Sel = uNewCS;
4619 pCtx->cs.ValidSel = uNewCS;
4620 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4621 pCtx->cs.u32Limit = cbLimit;
4622 pCtx->cs.u64Base = u64Base;
4623 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4624 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4625 }
4626
4627 /** @todo Debug trap. */
4628 if (fIsNewTSS386 && fNewDebugTrap)
4629 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4630
4631 /*
4632 * Construct the error code masks based on what caused this task switch.
4633 * See Intel Instruction reference for INT.
4634 */
4635 uint16_t uExt;
4636 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4637 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4638 {
4639 uExt = 1;
4640 }
4641 else
4642 uExt = 0;
4643
4644 /*
4645 * Push any error code on to the new stack.
4646 */
4647 if (fFlags & IEM_XCPT_FLAGS_ERR)
4648 {
4649 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4650 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4651 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4652
4653 /* Check that there is sufficient space on the stack. */
4654 /** @todo Factor out segment limit checking for normal/expand down segments
4655 * into a separate function. */
4656 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4657 {
4658 if ( pCtx->esp - 1 > cbLimitSS
4659 || pCtx->esp < cbStackFrame)
4660 {
4661 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4662 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4663 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4664 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4665 }
4666 }
4667 else
4668 {
4669 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4670 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4671 {
4672 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4673 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4674 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4675 }
4676 }
4677
4678
4679 if (fIsNewTSS386)
4680 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4681 else
4682 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4683 if (rcStrict != VINF_SUCCESS)
4684 {
4685 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4686 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4687 return rcStrict;
4688 }
4689 }
4690
4691 /* Check the new EIP against the new CS limit. */
4692 if (pCtx->eip > pCtx->cs.u32Limit)
4693 {
4694 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4695 pCtx->eip, pCtx->cs.u32Limit));
4696 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4697 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4698 }
4699
4700 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4701 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4702}
4703
4704
4705/**
4706 * Implements exceptions and interrupts for protected mode.
4707 *
4708 * @returns VBox strict status code.
4709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4710 * @param pCtx The CPU context.
4711 * @param cbInstr The number of bytes to offset rIP by in the return
4712 * address.
4713 * @param u8Vector The interrupt / exception vector number.
4714 * @param fFlags The flags.
4715 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4716 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4717 */
4718IEM_STATIC VBOXSTRICTRC
4719iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4720 PCPUMCTX pCtx,
4721 uint8_t cbInstr,
4722 uint8_t u8Vector,
4723 uint32_t fFlags,
4724 uint16_t uErr,
4725 uint64_t uCr2)
4726{
4727 /*
4728 * Read the IDT entry.
4729 */
4730 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4731 {
4732 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4733 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4734 }
4735 X86DESC Idte;
4736 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4737 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4738 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4739 return rcStrict;
4740 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4741 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4742 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4743
4744 /*
4745 * Check the descriptor type, DPL and such.
4746 * ASSUMES this is done in the same order as described for call-gate calls.
4747 */
4748 if (Idte.Gate.u1DescType)
4749 {
4750 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4751 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4752 }
4753 bool fTaskGate = false;
4754 uint8_t f32BitGate = true;
4755 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4756 switch (Idte.Gate.u4Type)
4757 {
4758 case X86_SEL_TYPE_SYS_UNDEFINED:
4759 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4760 case X86_SEL_TYPE_SYS_LDT:
4761 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4762 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4763 case X86_SEL_TYPE_SYS_UNDEFINED2:
4764 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4765 case X86_SEL_TYPE_SYS_UNDEFINED3:
4766 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4767 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4768 case X86_SEL_TYPE_SYS_UNDEFINED4:
4769 {
4770 /** @todo check what actually happens when the type is wrong...
4771 * esp. call gates. */
4772 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4773 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4774 }
4775
4776 case X86_SEL_TYPE_SYS_286_INT_GATE:
4777 f32BitGate = false;
4778 /* fall thru */
4779 case X86_SEL_TYPE_SYS_386_INT_GATE:
4780 fEflToClear |= X86_EFL_IF;
4781 break;
4782
4783 case X86_SEL_TYPE_SYS_TASK_GATE:
4784 fTaskGate = true;
4785#ifndef IEM_IMPLEMENTS_TASKSWITCH
4786 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4787#endif
4788 break;
4789
4790 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4791 f32BitGate = false;
4792 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4793 break;
4794
4795 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4796 }
4797
4798 /* Check DPL against CPL if applicable. */
4799 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4800 {
4801 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4802 {
4803 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4804 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4805 }
4806 }
4807
4808 /* Is it there? */
4809 if (!Idte.Gate.u1Present)
4810 {
4811 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4812 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4813 }
4814
4815 /* Is it a task-gate? */
4816 if (fTaskGate)
4817 {
4818 /*
4819 * Construct the error code masks based on what caused this task switch.
4820 * See Intel Instruction reference for INT.
4821 */
4822 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4823 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4824 RTSEL SelTSS = Idte.Gate.u16Sel;
4825
4826 /*
4827 * Fetch the TSS descriptor in the GDT.
4828 */
4829 IEMSELDESC DescTSS;
4830 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4831 if (rcStrict != VINF_SUCCESS)
4832 {
4833 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4834 VBOXSTRICTRC_VAL(rcStrict)));
4835 return rcStrict;
4836 }
4837
4838 /* The TSS descriptor must be a system segment and be available (not busy). */
4839 if ( DescTSS.Legacy.Gen.u1DescType
4840 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4841 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4842 {
4843 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4844 u8Vector, SelTSS, DescTSS.Legacy.au64));
4845 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4846 }
4847
4848 /* The TSS must be present. */
4849 if (!DescTSS.Legacy.Gen.u1Present)
4850 {
4851 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4852 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4853 }
4854
4855 /* Do the actual task switch. */
4856 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4857 }
4858
4859 /* A null CS is bad. */
4860 RTSEL NewCS = Idte.Gate.u16Sel;
4861 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4862 {
4863 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4864 return iemRaiseGeneralProtectionFault0(pVCpu);
4865 }
4866
4867 /* Fetch the descriptor for the new CS. */
4868 IEMSELDESC DescCS;
4869 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4870 if (rcStrict != VINF_SUCCESS)
4871 {
4872 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4873 return rcStrict;
4874 }
4875
4876 /* Must be a code segment. */
4877 if (!DescCS.Legacy.Gen.u1DescType)
4878 {
4879 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4880 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4881 }
4882 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4883 {
4884 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4885 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4886 }
4887
4888 /* Don't allow lowering the privilege level. */
4889 /** @todo Does the lowering of privileges apply to software interrupts
4890 * only? This has bearings on the more-privileged or
4891 * same-privilege stack behavior further down. A testcase would
4892 * be nice. */
4893 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4894 {
4895 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4896 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4897 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4898 }
4899
4900 /* Make sure the selector is present. */
4901 if (!DescCS.Legacy.Gen.u1Present)
4902 {
4903 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4904 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4905 }
4906
4907 /* Check the new EIP against the new CS limit. */
4908 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4909 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4910 ? Idte.Gate.u16OffsetLow
4911 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4912 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4913 if (uNewEip > cbLimitCS)
4914 {
4915 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4916 u8Vector, uNewEip, cbLimitCS, NewCS));
4917 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4918 }
4919 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4920
4921 /* Calc the flag image to push. */
4922 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4923 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4924 fEfl &= ~X86_EFL_RF;
4925 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4926 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4927
4928 /* From V8086 mode only go to CPL 0. */
4929 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4930 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4931 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4932 {
4933 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4934 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4935 }
4936
4937 /*
4938 * If the privilege level changes, we need to get a new stack from the TSS.
4939 * This in turns means validating the new SS and ESP...
4940 */
4941 if (uNewCpl != pVCpu->iem.s.uCpl)
4942 {
4943 RTSEL NewSS;
4944 uint32_t uNewEsp;
4945 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4946 if (rcStrict != VINF_SUCCESS)
4947 return rcStrict;
4948
4949 IEMSELDESC DescSS;
4950 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4951 if (rcStrict != VINF_SUCCESS)
4952 return rcStrict;
4953 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4954 if (!DescSS.Legacy.Gen.u1DefBig)
4955 {
4956 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4957 uNewEsp = (uint16_t)uNewEsp;
4958 }
4959
4960 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4961
4962 /* Check that there is sufficient space for the stack frame. */
4963 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4964 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4965 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4966 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4967
4968 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4969 {
4970 if ( uNewEsp - 1 > cbLimitSS
4971 || uNewEsp < cbStackFrame)
4972 {
4973 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4974 u8Vector, NewSS, uNewEsp, cbStackFrame));
4975 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4976 }
4977 }
4978 else
4979 {
4980 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4981 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4982 {
4983 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4984 u8Vector, NewSS, uNewEsp, cbStackFrame));
4985 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4986 }
4987 }
4988
4989 /*
4990 * Start making changes.
4991 */
4992
4993 /* Set the new CPL so that stack accesses use it. */
4994 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4995 pVCpu->iem.s.uCpl = uNewCpl;
4996
4997 /* Create the stack frame. */
4998 RTPTRUNION uStackFrame;
4999 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5000 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5001 if (rcStrict != VINF_SUCCESS)
5002 return rcStrict;
5003 void * const pvStackFrame = uStackFrame.pv;
5004 if (f32BitGate)
5005 {
5006 if (fFlags & IEM_XCPT_FLAGS_ERR)
5007 *uStackFrame.pu32++ = uErr;
5008 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
5009 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5010 uStackFrame.pu32[2] = fEfl;
5011 uStackFrame.pu32[3] = pCtx->esp;
5012 uStackFrame.pu32[4] = pCtx->ss.Sel;
5013 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
5014 if (fEfl & X86_EFL_VM)
5015 {
5016 uStackFrame.pu32[1] = pCtx->cs.Sel;
5017 uStackFrame.pu32[5] = pCtx->es.Sel;
5018 uStackFrame.pu32[6] = pCtx->ds.Sel;
5019 uStackFrame.pu32[7] = pCtx->fs.Sel;
5020 uStackFrame.pu32[8] = pCtx->gs.Sel;
5021 }
5022 }
5023 else
5024 {
5025 if (fFlags & IEM_XCPT_FLAGS_ERR)
5026 *uStackFrame.pu16++ = uErr;
5027 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
5028 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5029 uStackFrame.pu16[2] = fEfl;
5030 uStackFrame.pu16[3] = pCtx->sp;
5031 uStackFrame.pu16[4] = pCtx->ss.Sel;
5032 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
5033 if (fEfl & X86_EFL_VM)
5034 {
5035 uStackFrame.pu16[1] = pCtx->cs.Sel;
5036 uStackFrame.pu16[5] = pCtx->es.Sel;
5037 uStackFrame.pu16[6] = pCtx->ds.Sel;
5038 uStackFrame.pu16[7] = pCtx->fs.Sel;
5039 uStackFrame.pu16[8] = pCtx->gs.Sel;
5040 }
5041 }
5042 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5043 if (rcStrict != VINF_SUCCESS)
5044 return rcStrict;
5045
5046 /* Mark the selectors 'accessed' (hope this is the correct time). */
5047 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5048 * after pushing the stack frame? (Write protect the gdt + stack to
5049 * find out.) */
5050 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5051 {
5052 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5053 if (rcStrict != VINF_SUCCESS)
5054 return rcStrict;
5055 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5056 }
5057
5058 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5059 {
5060 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5061 if (rcStrict != VINF_SUCCESS)
5062 return rcStrict;
5063 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5064 }
5065
5066 /*
5067 * Start comitting the register changes (joins with the DPL=CPL branch).
5068 */
5069 pCtx->ss.Sel = NewSS;
5070 pCtx->ss.ValidSel = NewSS;
5071 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5072 pCtx->ss.u32Limit = cbLimitSS;
5073 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5074 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5075 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5076 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5077 * SP is loaded).
5078 * Need to check the other combinations too:
5079 * - 16-bit TSS, 32-bit handler
5080 * - 32-bit TSS, 16-bit handler */
5081 if (!pCtx->ss.Attr.n.u1DefBig)
5082 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5083 else
5084 pCtx->rsp = uNewEsp - cbStackFrame;
5085
5086 if (fEfl & X86_EFL_VM)
5087 {
5088 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5089 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5090 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5091 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5092 }
5093 }
5094 /*
5095 * Same privilege, no stack change and smaller stack frame.
5096 */
5097 else
5098 {
5099 uint64_t uNewRsp;
5100 RTPTRUNION uStackFrame;
5101 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5102 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5103 if (rcStrict != VINF_SUCCESS)
5104 return rcStrict;
5105 void * const pvStackFrame = uStackFrame.pv;
5106
5107 if (f32BitGate)
5108 {
5109 if (fFlags & IEM_XCPT_FLAGS_ERR)
5110 *uStackFrame.pu32++ = uErr;
5111 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5112 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5113 uStackFrame.pu32[2] = fEfl;
5114 }
5115 else
5116 {
5117 if (fFlags & IEM_XCPT_FLAGS_ERR)
5118 *uStackFrame.pu16++ = uErr;
5119 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5120 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5121 uStackFrame.pu16[2] = fEfl;
5122 }
5123 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5124 if (rcStrict != VINF_SUCCESS)
5125 return rcStrict;
5126
5127 /* Mark the CS selector as 'accessed'. */
5128 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5129 {
5130 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5131 if (rcStrict != VINF_SUCCESS)
5132 return rcStrict;
5133 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5134 }
5135
5136 /*
5137 * Start committing the register changes (joins with the other branch).
5138 */
5139 pCtx->rsp = uNewRsp;
5140 }
5141
5142 /* ... register committing continues. */
5143 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5144 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5145 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5146 pCtx->cs.u32Limit = cbLimitCS;
5147 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5148 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5149
5150 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5151 fEfl &= ~fEflToClear;
5152 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5153
5154 if (fFlags & IEM_XCPT_FLAGS_CR2)
5155 pCtx->cr2 = uCr2;
5156
5157 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5158 iemRaiseXcptAdjustState(pCtx, u8Vector);
5159
5160 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5161}
5162
5163
5164/**
5165 * Implements exceptions and interrupts for long mode.
5166 *
5167 * @returns VBox strict status code.
5168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5169 * @param pCtx The CPU context.
5170 * @param cbInstr The number of bytes to offset rIP by in the return
5171 * address.
5172 * @param u8Vector The interrupt / exception vector number.
5173 * @param fFlags The flags.
5174 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5175 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5176 */
5177IEM_STATIC VBOXSTRICTRC
5178iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5179 PCPUMCTX pCtx,
5180 uint8_t cbInstr,
5181 uint8_t u8Vector,
5182 uint32_t fFlags,
5183 uint16_t uErr,
5184 uint64_t uCr2)
5185{
5186 /*
5187 * Read the IDT entry.
5188 */
5189 uint16_t offIdt = (uint16_t)u8Vector << 4;
5190 if (pCtx->idtr.cbIdt < offIdt + 7)
5191 {
5192 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5193 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5194 }
5195 X86DESC64 Idte;
5196 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5197 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5198 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5199 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5200 return rcStrict;
5201 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5202 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5203 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5204
5205 /*
5206 * Check the descriptor type, DPL and such.
5207 * ASSUMES this is done in the same order as described for call-gate calls.
5208 */
5209 if (Idte.Gate.u1DescType)
5210 {
5211 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5212 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5213 }
5214 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5215 switch (Idte.Gate.u4Type)
5216 {
5217 case AMD64_SEL_TYPE_SYS_INT_GATE:
5218 fEflToClear |= X86_EFL_IF;
5219 break;
5220 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5221 break;
5222
5223 default:
5224 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5225 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5226 }
5227
5228 /* Check DPL against CPL if applicable. */
5229 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5230 {
5231 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5232 {
5233 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5234 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5235 }
5236 }
5237
5238 /* Is it there? */
5239 if (!Idte.Gate.u1Present)
5240 {
5241 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5242 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5243 }
5244
5245 /* A null CS is bad. */
5246 RTSEL NewCS = Idte.Gate.u16Sel;
5247 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5248 {
5249 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5250 return iemRaiseGeneralProtectionFault0(pVCpu);
5251 }
5252
5253 /* Fetch the descriptor for the new CS. */
5254 IEMSELDESC DescCS;
5255 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5256 if (rcStrict != VINF_SUCCESS)
5257 {
5258 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5259 return rcStrict;
5260 }
5261
5262 /* Must be a 64-bit code segment. */
5263 if (!DescCS.Long.Gen.u1DescType)
5264 {
5265 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5266 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5267 }
5268 if ( !DescCS.Long.Gen.u1Long
5269 || DescCS.Long.Gen.u1DefBig
5270 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5271 {
5272 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5273 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5274 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5275 }
5276
5277 /* Don't allow lowering the privilege level. For non-conforming CS
5278 selectors, the CS.DPL sets the privilege level the trap/interrupt
5279 handler runs at. For conforming CS selectors, the CPL remains
5280 unchanged, but the CS.DPL must be <= CPL. */
5281 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5282 * when CPU in Ring-0. Result \#GP? */
5283 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5284 {
5285 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5286 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5287 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5288 }
5289
5290
5291 /* Make sure the selector is present. */
5292 if (!DescCS.Legacy.Gen.u1Present)
5293 {
5294 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5295 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5296 }
5297
5298 /* Check that the new RIP is canonical. */
5299 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5300 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5301 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5302 if (!IEM_IS_CANONICAL(uNewRip))
5303 {
5304 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5305 return iemRaiseGeneralProtectionFault0(pVCpu);
5306 }
5307
5308 /*
5309 * If the privilege level changes or if the IST isn't zero, we need to get
5310 * a new stack from the TSS.
5311 */
5312 uint64_t uNewRsp;
5313 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5314 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5315 if ( uNewCpl != pVCpu->iem.s.uCpl
5316 || Idte.Gate.u3IST != 0)
5317 {
5318 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5319 if (rcStrict != VINF_SUCCESS)
5320 return rcStrict;
5321 }
5322 else
5323 uNewRsp = pCtx->rsp;
5324 uNewRsp &= ~(uint64_t)0xf;
5325
5326 /*
5327 * Calc the flag image to push.
5328 */
5329 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5330 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5331 fEfl &= ~X86_EFL_RF;
5332 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5333 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5334
5335 /*
5336 * Start making changes.
5337 */
5338 /* Set the new CPL so that stack accesses use it. */
5339 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5340 pVCpu->iem.s.uCpl = uNewCpl;
5341
5342 /* Create the stack frame. */
5343 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5344 RTPTRUNION uStackFrame;
5345 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5346 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5347 if (rcStrict != VINF_SUCCESS)
5348 return rcStrict;
5349 void * const pvStackFrame = uStackFrame.pv;
5350
5351 if (fFlags & IEM_XCPT_FLAGS_ERR)
5352 *uStackFrame.pu64++ = uErr;
5353 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5354 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5355 uStackFrame.pu64[2] = fEfl;
5356 uStackFrame.pu64[3] = pCtx->rsp;
5357 uStackFrame.pu64[4] = pCtx->ss.Sel;
5358 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5359 if (rcStrict != VINF_SUCCESS)
5360 return rcStrict;
5361
5362 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5363 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5364 * after pushing the stack frame? (Write protect the gdt + stack to
5365 * find out.) */
5366 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5367 {
5368 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5369 if (rcStrict != VINF_SUCCESS)
5370 return rcStrict;
5371 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5372 }
5373
5374 /*
5375 * Start comitting the register changes.
5376 */
5377 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5378 * hidden registers when interrupting 32-bit or 16-bit code! */
5379 if (uNewCpl != uOldCpl)
5380 {
5381 pCtx->ss.Sel = 0 | uNewCpl;
5382 pCtx->ss.ValidSel = 0 | uNewCpl;
5383 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5384 pCtx->ss.u32Limit = UINT32_MAX;
5385 pCtx->ss.u64Base = 0;
5386 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5387 }
5388 pCtx->rsp = uNewRsp - cbStackFrame;
5389 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5390 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5391 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5392 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5393 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5394 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5395 pCtx->rip = uNewRip;
5396
5397 fEfl &= ~fEflToClear;
5398 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5399
5400 if (fFlags & IEM_XCPT_FLAGS_CR2)
5401 pCtx->cr2 = uCr2;
5402
5403 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5404 iemRaiseXcptAdjustState(pCtx, u8Vector);
5405
5406 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5407}
5408
5409
5410/**
5411 * Implements exceptions and interrupts.
5412 *
5413 * All exceptions and interrupts goes thru this function!
5414 *
5415 * @returns VBox strict status code.
5416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5417 * @param cbInstr The number of bytes to offset rIP by in the return
5418 * address.
5419 * @param u8Vector The interrupt / exception vector number.
5420 * @param fFlags The flags.
5421 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5422 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5423 */
5424DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5425iemRaiseXcptOrInt(PVMCPU pVCpu,
5426 uint8_t cbInstr,
5427 uint8_t u8Vector,
5428 uint32_t fFlags,
5429 uint16_t uErr,
5430 uint64_t uCr2)
5431{
5432 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5433#ifdef IN_RING0
5434 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5435 AssertRCReturn(rc, rc);
5436#endif
5437
5438#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5439 /*
5440 * Flush prefetch buffer
5441 */
5442 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5443#endif
5444
5445 /*
5446 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5447 */
5448 if ( pCtx->eflags.Bits.u1VM
5449 && pCtx->eflags.Bits.u2IOPL != 3
5450 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5451 && (pCtx->cr0 & X86_CR0_PE) )
5452 {
5453 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5454 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5455 u8Vector = X86_XCPT_GP;
5456 uErr = 0;
5457 }
5458#ifdef DBGFTRACE_ENABLED
5459 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5460 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5461 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5462#endif
5463
5464#ifdef VBOX_WITH_NESTED_HWVIRT
5465 if (IEM_IS_SVM_ENABLED(pVCpu))
5466 {
5467 /*
5468 * If the event is being injected as part of VMRUN, it isn't subject to event
5469 * intercepts in the nested-guest. However, secondary exceptions that occur
5470 * during injection of any event -are- subject to exception intercepts.
5471 * See AMD spec. 15.20 "Event Injection".
5472 */
5473 if (!pCtx->hwvirt.svm.fInterceptEvents)
5474 pCtx->hwvirt.svm.fInterceptEvents = 1;
5475 else
5476 {
5477 /*
5478 * Check and handle if the event being raised is intercepted.
5479 */
5480 VBOXSTRICTRC rcStrict0 = iemHandleSvmNstGstEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5481 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5482 return rcStrict0;
5483 }
5484 }
5485#endif /* VBOX_WITH_NESTED_HWVIRT */
5486
5487 /*
5488 * Do recursion accounting.
5489 */
5490 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5491 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5492 if (pVCpu->iem.s.cXcptRecursions == 0)
5493 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5494 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5495 else
5496 {
5497 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5498 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5499 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5500
5501 if (pVCpu->iem.s.cXcptRecursions >= 3)
5502 {
5503#ifdef DEBUG_bird
5504 AssertFailed();
5505#endif
5506 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5507 }
5508
5509 /*
5510 * Evaluate the sequence of recurring events.
5511 */
5512 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5513 NULL /* pXcptRaiseInfo */);
5514 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5515 { /* likely */ }
5516 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5517 {
5518 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5519 u8Vector = X86_XCPT_DF;
5520 uErr = 0;
5521 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5522 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5523 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5524 }
5525 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5526 {
5527 Log2(("iemRaiseXcptOrInt: raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5528 return iemInitiateCpuShutdown(pVCpu);
5529 }
5530 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5531 {
5532 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5533 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5534 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5535 return VERR_EM_GUEST_CPU_HANG;
5536 }
5537 else
5538 {
5539 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5540 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5541 return VERR_IEM_IPE_9;
5542 }
5543
5544 /*
5545 * The 'EXT' bit is set when an exception occurs during deliver of an external
5546 * event (such as an interrupt or earlier exception)[1]. Privileged software
5547 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5548 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5549 *
5550 * [1] - Intel spec. 6.13 "Error Code"
5551 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5552 * [3] - Intel Instruction reference for INT n.
5553 */
5554 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5555 && (fFlags & IEM_XCPT_FLAGS_ERR)
5556 && u8Vector != X86_XCPT_PF
5557 && u8Vector != X86_XCPT_DF)
5558 {
5559 uErr |= X86_TRAP_ERR_EXTERNAL;
5560 }
5561 }
5562
5563 pVCpu->iem.s.cXcptRecursions++;
5564 pVCpu->iem.s.uCurXcpt = u8Vector;
5565 pVCpu->iem.s.fCurXcpt = fFlags;
5566 pVCpu->iem.s.uCurXcptErr = uErr;
5567 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5568
5569 /*
5570 * Extensive logging.
5571 */
5572#if defined(LOG_ENABLED) && defined(IN_RING3)
5573 if (LogIs3Enabled())
5574 {
5575 PVM pVM = pVCpu->CTX_SUFF(pVM);
5576 char szRegs[4096];
5577 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5578 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5579 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5580 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5581 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5582 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5583 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5584 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5585 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5586 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5587 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5588 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5589 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5590 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5591 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5592 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5593 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5594 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5595 " efer=%016VR{efer}\n"
5596 " pat=%016VR{pat}\n"
5597 " sf_mask=%016VR{sf_mask}\n"
5598 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5599 " lstar=%016VR{lstar}\n"
5600 " star=%016VR{star} cstar=%016VR{cstar}\n"
5601 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5602 );
5603
5604 char szInstr[256];
5605 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5606 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5607 szInstr, sizeof(szInstr), NULL);
5608 Log3(("%s%s\n", szRegs, szInstr));
5609 }
5610#endif /* LOG_ENABLED */
5611
5612 /*
5613 * Call the mode specific worker function.
5614 */
5615 VBOXSTRICTRC rcStrict;
5616 if (!(pCtx->cr0 & X86_CR0_PE))
5617 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5618 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5619 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5620 else
5621 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5622
5623 /* Flush the prefetch buffer. */
5624#ifdef IEM_WITH_CODE_TLB
5625 pVCpu->iem.s.pbInstrBuf = NULL;
5626#else
5627 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5628#endif
5629
5630 /*
5631 * Unwind.
5632 */
5633 pVCpu->iem.s.cXcptRecursions--;
5634 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5635 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5636 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5637 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5638 return rcStrict;
5639}
5640
5641#ifdef IEM_WITH_SETJMP
5642/**
5643 * See iemRaiseXcptOrInt. Will not return.
5644 */
5645IEM_STATIC DECL_NO_RETURN(void)
5646iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5647 uint8_t cbInstr,
5648 uint8_t u8Vector,
5649 uint32_t fFlags,
5650 uint16_t uErr,
5651 uint64_t uCr2)
5652{
5653 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5654 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5655}
5656#endif
5657
5658
5659/** \#DE - 00. */
5660DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5661{
5662 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5663}
5664
5665
5666/** \#DB - 01.
5667 * @note This automatically clear DR7.GD. */
5668DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5669{
5670 /** @todo set/clear RF. */
5671 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5672 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5673}
5674
5675
5676/** \#BR - 05. */
5677DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5678{
5679 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5680}
5681
5682
5683/** \#UD - 06. */
5684DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5685{
5686 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5687}
5688
5689
5690/** \#NM - 07. */
5691DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5692{
5693 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5694}
5695
5696
5697/** \#TS(err) - 0a. */
5698DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5699{
5700 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5701}
5702
5703
5704/** \#TS(tr) - 0a. */
5705DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5706{
5707 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5708 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5709}
5710
5711
5712/** \#TS(0) - 0a. */
5713DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5714{
5715 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5716 0, 0);
5717}
5718
5719
5720/** \#TS(err) - 0a. */
5721DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5722{
5723 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5724 uSel & X86_SEL_MASK_OFF_RPL, 0);
5725}
5726
5727
5728/** \#NP(err) - 0b. */
5729DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5730{
5731 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5732}
5733
5734
5735/** \#NP(sel) - 0b. */
5736DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5737{
5738 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5739 uSel & ~X86_SEL_RPL, 0);
5740}
5741
5742
5743/** \#SS(seg) - 0c. */
5744DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5745{
5746 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5747 uSel & ~X86_SEL_RPL, 0);
5748}
5749
5750
5751/** \#SS(err) - 0c. */
5752DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5753{
5754 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5755}
5756
5757
5758/** \#GP(n) - 0d. */
5759DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5760{
5761 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5762}
5763
5764
5765/** \#GP(0) - 0d. */
5766DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5767{
5768 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5769}
5770
5771#ifdef IEM_WITH_SETJMP
5772/** \#GP(0) - 0d. */
5773DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5774{
5775 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5776}
5777#endif
5778
5779
5780/** \#GP(sel) - 0d. */
5781DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5782{
5783 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5784 Sel & ~X86_SEL_RPL, 0);
5785}
5786
5787
5788/** \#GP(0) - 0d. */
5789DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5790{
5791 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5792}
5793
5794
5795/** \#GP(sel) - 0d. */
5796DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5797{
5798 NOREF(iSegReg); NOREF(fAccess);
5799 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5800 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5801}
5802
5803#ifdef IEM_WITH_SETJMP
5804/** \#GP(sel) - 0d, longjmp. */
5805DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5806{
5807 NOREF(iSegReg); NOREF(fAccess);
5808 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5809 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5810}
5811#endif
5812
5813/** \#GP(sel) - 0d. */
5814DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5815{
5816 NOREF(Sel);
5817 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5818}
5819
5820#ifdef IEM_WITH_SETJMP
5821/** \#GP(sel) - 0d, longjmp. */
5822DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5823{
5824 NOREF(Sel);
5825 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5826}
5827#endif
5828
5829
5830/** \#GP(sel) - 0d. */
5831DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5832{
5833 NOREF(iSegReg); NOREF(fAccess);
5834 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5835}
5836
5837#ifdef IEM_WITH_SETJMP
5838/** \#GP(sel) - 0d, longjmp. */
5839DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5840 uint32_t fAccess)
5841{
5842 NOREF(iSegReg); NOREF(fAccess);
5843 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5844}
5845#endif
5846
5847
5848/** \#PF(n) - 0e. */
5849DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5850{
5851 uint16_t uErr;
5852 switch (rc)
5853 {
5854 case VERR_PAGE_NOT_PRESENT:
5855 case VERR_PAGE_TABLE_NOT_PRESENT:
5856 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5857 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5858 uErr = 0;
5859 break;
5860
5861 default:
5862 AssertMsgFailed(("%Rrc\n", rc));
5863 /* fall thru */
5864 case VERR_ACCESS_DENIED:
5865 uErr = X86_TRAP_PF_P;
5866 break;
5867
5868 /** @todo reserved */
5869 }
5870
5871 if (pVCpu->iem.s.uCpl == 3)
5872 uErr |= X86_TRAP_PF_US;
5873
5874 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5875 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5876 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5877 uErr |= X86_TRAP_PF_ID;
5878
5879#if 0 /* This is so much non-sense, really. Why was it done like that? */
5880 /* Note! RW access callers reporting a WRITE protection fault, will clear
5881 the READ flag before calling. So, read-modify-write accesses (RW)
5882 can safely be reported as READ faults. */
5883 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5884 uErr |= X86_TRAP_PF_RW;
5885#else
5886 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5887 {
5888 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5889 uErr |= X86_TRAP_PF_RW;
5890 }
5891#endif
5892
5893 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5894 uErr, GCPtrWhere);
5895}
5896
5897#ifdef IEM_WITH_SETJMP
5898/** \#PF(n) - 0e, longjmp. */
5899IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5900{
5901 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5902}
5903#endif
5904
5905
5906/** \#MF(0) - 10. */
5907DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5908{
5909 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5910}
5911
5912
5913/** \#AC(0) - 11. */
5914DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5915{
5916 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5917}
5918
5919
5920/**
5921 * Macro for calling iemCImplRaiseDivideError().
5922 *
5923 * This enables us to add/remove arguments and force different levels of
5924 * inlining as we wish.
5925 *
5926 * @return Strict VBox status code.
5927 */
5928#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5929IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5930{
5931 NOREF(cbInstr);
5932 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5933}
5934
5935
5936/**
5937 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5938 *
5939 * This enables us to add/remove arguments and force different levels of
5940 * inlining as we wish.
5941 *
5942 * @return Strict VBox status code.
5943 */
5944#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5945IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5946{
5947 NOREF(cbInstr);
5948 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5949}
5950
5951
5952/**
5953 * Macro for calling iemCImplRaiseInvalidOpcode().
5954 *
5955 * This enables us to add/remove arguments and force different levels of
5956 * inlining as we wish.
5957 *
5958 * @return Strict VBox status code.
5959 */
5960#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5961IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5962{
5963 NOREF(cbInstr);
5964 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5965}
5966
5967
5968/** @} */
5969
5970
5971/*
5972 *
5973 * Helpers routines.
5974 * Helpers routines.
5975 * Helpers routines.
5976 *
5977 */
5978
5979/**
5980 * Recalculates the effective operand size.
5981 *
5982 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5983 */
5984IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5985{
5986 switch (pVCpu->iem.s.enmCpuMode)
5987 {
5988 case IEMMODE_16BIT:
5989 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5990 break;
5991 case IEMMODE_32BIT:
5992 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5993 break;
5994 case IEMMODE_64BIT:
5995 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5996 {
5997 case 0:
5998 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5999 break;
6000 case IEM_OP_PRF_SIZE_OP:
6001 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6002 break;
6003 case IEM_OP_PRF_SIZE_REX_W:
6004 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6005 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6006 break;
6007 }
6008 break;
6009 default:
6010 AssertFailed();
6011 }
6012}
6013
6014
6015/**
6016 * Sets the default operand size to 64-bit and recalculates the effective
6017 * operand size.
6018 *
6019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6020 */
6021IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6022{
6023 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6024 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6025 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6026 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6027 else
6028 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6029}
6030
6031
6032/*
6033 *
6034 * Common opcode decoders.
6035 * Common opcode decoders.
6036 * Common opcode decoders.
6037 *
6038 */
6039//#include <iprt/mem.h>
6040
6041/**
6042 * Used to add extra details about a stub case.
6043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6044 */
6045IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6046{
6047#if defined(LOG_ENABLED) && defined(IN_RING3)
6048 PVM pVM = pVCpu->CTX_SUFF(pVM);
6049 char szRegs[4096];
6050 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6051 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6052 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6053 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6054 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6055 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6056 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6057 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6058 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6059 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6060 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6061 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6062 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6063 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6064 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6065 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6066 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6067 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6068 " efer=%016VR{efer}\n"
6069 " pat=%016VR{pat}\n"
6070 " sf_mask=%016VR{sf_mask}\n"
6071 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6072 " lstar=%016VR{lstar}\n"
6073 " star=%016VR{star} cstar=%016VR{cstar}\n"
6074 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6075 );
6076
6077 char szInstr[256];
6078 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6079 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6080 szInstr, sizeof(szInstr), NULL);
6081
6082 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6083#else
6084 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6085#endif
6086}
6087
6088/**
6089 * Complains about a stub.
6090 *
6091 * Providing two versions of this macro, one for daily use and one for use when
6092 * working on IEM.
6093 */
6094#if 0
6095# define IEMOP_BITCH_ABOUT_STUB() \
6096 do { \
6097 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6098 iemOpStubMsg2(pVCpu); \
6099 RTAssertPanic(); \
6100 } while (0)
6101#else
6102# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6103#endif
6104
6105/** Stubs an opcode. */
6106#define FNIEMOP_STUB(a_Name) \
6107 FNIEMOP_DEF(a_Name) \
6108 { \
6109 RT_NOREF_PV(pVCpu); \
6110 IEMOP_BITCH_ABOUT_STUB(); \
6111 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6112 } \
6113 typedef int ignore_semicolon
6114
6115/** Stubs an opcode. */
6116#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6117 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6118 { \
6119 RT_NOREF_PV(pVCpu); \
6120 RT_NOREF_PV(a_Name0); \
6121 IEMOP_BITCH_ABOUT_STUB(); \
6122 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6123 } \
6124 typedef int ignore_semicolon
6125
6126/** Stubs an opcode which currently should raise \#UD. */
6127#define FNIEMOP_UD_STUB(a_Name) \
6128 FNIEMOP_DEF(a_Name) \
6129 { \
6130 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6131 return IEMOP_RAISE_INVALID_OPCODE(); \
6132 } \
6133 typedef int ignore_semicolon
6134
6135/** Stubs an opcode which currently should raise \#UD. */
6136#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6137 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6138 { \
6139 RT_NOREF_PV(pVCpu); \
6140 RT_NOREF_PV(a_Name0); \
6141 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6142 return IEMOP_RAISE_INVALID_OPCODE(); \
6143 } \
6144 typedef int ignore_semicolon
6145
6146
6147
6148/** @name Register Access.
6149 * @{
6150 */
6151
6152/**
6153 * Gets a reference (pointer) to the specified hidden segment register.
6154 *
6155 * @returns Hidden register reference.
6156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6157 * @param iSegReg The segment register.
6158 */
6159IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6160{
6161 Assert(iSegReg < X86_SREG_COUNT);
6162 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6163 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6164
6165#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6166 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6167 { /* likely */ }
6168 else
6169 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6170#else
6171 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6172#endif
6173 return pSReg;
6174}
6175
6176
6177/**
6178 * Ensures that the given hidden segment register is up to date.
6179 *
6180 * @returns Hidden register reference.
6181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6182 * @param pSReg The segment register.
6183 */
6184IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6185{
6186#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6187 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6188 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6189#else
6190 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6191 NOREF(pVCpu);
6192#endif
6193 return pSReg;
6194}
6195
6196
6197/**
6198 * Gets a reference (pointer) to the specified segment register (the selector
6199 * value).
6200 *
6201 * @returns Pointer to the selector variable.
6202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6203 * @param iSegReg The segment register.
6204 */
6205DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6206{
6207 Assert(iSegReg < X86_SREG_COUNT);
6208 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6209 return &pCtx->aSRegs[iSegReg].Sel;
6210}
6211
6212
6213/**
6214 * Fetches the selector value of a segment register.
6215 *
6216 * @returns The selector value.
6217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6218 * @param iSegReg The segment register.
6219 */
6220DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6221{
6222 Assert(iSegReg < X86_SREG_COUNT);
6223 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6224}
6225
6226
6227/**
6228 * Gets a reference (pointer) to the specified general purpose register.
6229 *
6230 * @returns Register reference.
6231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6232 * @param iReg The general purpose register.
6233 */
6234DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6235{
6236 Assert(iReg < 16);
6237 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6238 return &pCtx->aGRegs[iReg];
6239}
6240
6241
6242/**
6243 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6244 *
6245 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6246 *
6247 * @returns Register reference.
6248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6249 * @param iReg The register.
6250 */
6251DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6252{
6253 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6254 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6255 {
6256 Assert(iReg < 16);
6257 return &pCtx->aGRegs[iReg].u8;
6258 }
6259 /* high 8-bit register. */
6260 Assert(iReg < 8);
6261 return &pCtx->aGRegs[iReg & 3].bHi;
6262}
6263
6264
6265/**
6266 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6267 *
6268 * @returns Register reference.
6269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6270 * @param iReg The register.
6271 */
6272DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6273{
6274 Assert(iReg < 16);
6275 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6276 return &pCtx->aGRegs[iReg].u16;
6277}
6278
6279
6280/**
6281 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6282 *
6283 * @returns Register reference.
6284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6285 * @param iReg The register.
6286 */
6287DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6288{
6289 Assert(iReg < 16);
6290 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6291 return &pCtx->aGRegs[iReg].u32;
6292}
6293
6294
6295/**
6296 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6297 *
6298 * @returns Register reference.
6299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6300 * @param iReg The register.
6301 */
6302DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6303{
6304 Assert(iReg < 64);
6305 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6306 return &pCtx->aGRegs[iReg].u64;
6307}
6308
6309
6310/**
6311 * Fetches the value of a 8-bit general purpose register.
6312 *
6313 * @returns The register value.
6314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6315 * @param iReg The register.
6316 */
6317DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6318{
6319 return *iemGRegRefU8(pVCpu, iReg);
6320}
6321
6322
6323/**
6324 * Fetches the value of a 16-bit general purpose register.
6325 *
6326 * @returns The register value.
6327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6328 * @param iReg The register.
6329 */
6330DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6331{
6332 Assert(iReg < 16);
6333 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6334}
6335
6336
6337/**
6338 * Fetches the value of a 32-bit general purpose register.
6339 *
6340 * @returns The register value.
6341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6342 * @param iReg The register.
6343 */
6344DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6345{
6346 Assert(iReg < 16);
6347 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6348}
6349
6350
6351/**
6352 * Fetches the value of a 64-bit general purpose register.
6353 *
6354 * @returns The register value.
6355 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6356 * @param iReg The register.
6357 */
6358DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6359{
6360 Assert(iReg < 16);
6361 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6362}
6363
6364
6365/**
6366 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6367 *
6368 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6369 * segment limit.
6370 *
6371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6372 * @param offNextInstr The offset of the next instruction.
6373 */
6374IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6375{
6376 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6377 switch (pVCpu->iem.s.enmEffOpSize)
6378 {
6379 case IEMMODE_16BIT:
6380 {
6381 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6382 if ( uNewIp > pCtx->cs.u32Limit
6383 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6384 return iemRaiseGeneralProtectionFault0(pVCpu);
6385 pCtx->rip = uNewIp;
6386 break;
6387 }
6388
6389 case IEMMODE_32BIT:
6390 {
6391 Assert(pCtx->rip <= UINT32_MAX);
6392 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6393
6394 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6395 if (uNewEip > pCtx->cs.u32Limit)
6396 return iemRaiseGeneralProtectionFault0(pVCpu);
6397 pCtx->rip = uNewEip;
6398 break;
6399 }
6400
6401 case IEMMODE_64BIT:
6402 {
6403 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6404
6405 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6406 if (!IEM_IS_CANONICAL(uNewRip))
6407 return iemRaiseGeneralProtectionFault0(pVCpu);
6408 pCtx->rip = uNewRip;
6409 break;
6410 }
6411
6412 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6413 }
6414
6415 pCtx->eflags.Bits.u1RF = 0;
6416
6417#ifndef IEM_WITH_CODE_TLB
6418 /* Flush the prefetch buffer. */
6419 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6420#endif
6421
6422 return VINF_SUCCESS;
6423}
6424
6425
6426/**
6427 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6428 *
6429 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6430 * segment limit.
6431 *
6432 * @returns Strict VBox status code.
6433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6434 * @param offNextInstr The offset of the next instruction.
6435 */
6436IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6437{
6438 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6439 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6440
6441 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6442 if ( uNewIp > pCtx->cs.u32Limit
6443 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6444 return iemRaiseGeneralProtectionFault0(pVCpu);
6445 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6446 pCtx->rip = uNewIp;
6447 pCtx->eflags.Bits.u1RF = 0;
6448
6449#ifndef IEM_WITH_CODE_TLB
6450 /* Flush the prefetch buffer. */
6451 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6452#endif
6453
6454 return VINF_SUCCESS;
6455}
6456
6457
6458/**
6459 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6460 *
6461 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6462 * segment limit.
6463 *
6464 * @returns Strict VBox status code.
6465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6466 * @param offNextInstr The offset of the next instruction.
6467 */
6468IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6469{
6470 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6471 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6472
6473 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6474 {
6475 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6476
6477 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6478 if (uNewEip > pCtx->cs.u32Limit)
6479 return iemRaiseGeneralProtectionFault0(pVCpu);
6480 pCtx->rip = uNewEip;
6481 }
6482 else
6483 {
6484 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6485
6486 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6487 if (!IEM_IS_CANONICAL(uNewRip))
6488 return iemRaiseGeneralProtectionFault0(pVCpu);
6489 pCtx->rip = uNewRip;
6490 }
6491 pCtx->eflags.Bits.u1RF = 0;
6492
6493#ifndef IEM_WITH_CODE_TLB
6494 /* Flush the prefetch buffer. */
6495 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6496#endif
6497
6498 return VINF_SUCCESS;
6499}
6500
6501
6502/**
6503 * Performs a near jump to the specified address.
6504 *
6505 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6506 * segment limit.
6507 *
6508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6509 * @param uNewRip The new RIP value.
6510 */
6511IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6512{
6513 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6514 switch (pVCpu->iem.s.enmEffOpSize)
6515 {
6516 case IEMMODE_16BIT:
6517 {
6518 Assert(uNewRip <= UINT16_MAX);
6519 if ( uNewRip > pCtx->cs.u32Limit
6520 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6521 return iemRaiseGeneralProtectionFault0(pVCpu);
6522 /** @todo Test 16-bit jump in 64-bit mode. */
6523 pCtx->rip = uNewRip;
6524 break;
6525 }
6526
6527 case IEMMODE_32BIT:
6528 {
6529 Assert(uNewRip <= UINT32_MAX);
6530 Assert(pCtx->rip <= UINT32_MAX);
6531 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6532
6533 if (uNewRip > pCtx->cs.u32Limit)
6534 return iemRaiseGeneralProtectionFault0(pVCpu);
6535 pCtx->rip = uNewRip;
6536 break;
6537 }
6538
6539 case IEMMODE_64BIT:
6540 {
6541 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6542
6543 if (!IEM_IS_CANONICAL(uNewRip))
6544 return iemRaiseGeneralProtectionFault0(pVCpu);
6545 pCtx->rip = uNewRip;
6546 break;
6547 }
6548
6549 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6550 }
6551
6552 pCtx->eflags.Bits.u1RF = 0;
6553
6554#ifndef IEM_WITH_CODE_TLB
6555 /* Flush the prefetch buffer. */
6556 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6557#endif
6558
6559 return VINF_SUCCESS;
6560}
6561
6562
6563/**
6564 * Get the address of the top of the stack.
6565 *
6566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6567 * @param pCtx The CPU context which SP/ESP/RSP should be
6568 * read.
6569 */
6570DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6571{
6572 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6573 return pCtx->rsp;
6574 if (pCtx->ss.Attr.n.u1DefBig)
6575 return pCtx->esp;
6576 return pCtx->sp;
6577}
6578
6579
6580/**
6581 * Updates the RIP/EIP/IP to point to the next instruction.
6582 *
6583 * This function leaves the EFLAGS.RF flag alone.
6584 *
6585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6586 * @param cbInstr The number of bytes to add.
6587 */
6588IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6589{
6590 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6591 switch (pVCpu->iem.s.enmCpuMode)
6592 {
6593 case IEMMODE_16BIT:
6594 Assert(pCtx->rip <= UINT16_MAX);
6595 pCtx->eip += cbInstr;
6596 pCtx->eip &= UINT32_C(0xffff);
6597 break;
6598
6599 case IEMMODE_32BIT:
6600 pCtx->eip += cbInstr;
6601 Assert(pCtx->rip <= UINT32_MAX);
6602 break;
6603
6604 case IEMMODE_64BIT:
6605 pCtx->rip += cbInstr;
6606 break;
6607 default: AssertFailed();
6608 }
6609}
6610
6611
6612#if 0
6613/**
6614 * Updates the RIP/EIP/IP to point to the next instruction.
6615 *
6616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6617 */
6618IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6619{
6620 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6621}
6622#endif
6623
6624
6625
6626/**
6627 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6628 *
6629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6630 * @param cbInstr The number of bytes to add.
6631 */
6632IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6633{
6634 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6635
6636 pCtx->eflags.Bits.u1RF = 0;
6637
6638 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6639#if ARCH_BITS >= 64
6640 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6641 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6642 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6643#else
6644 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6645 pCtx->rip += cbInstr;
6646 else
6647 {
6648 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6649 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6650 }
6651#endif
6652}
6653
6654
6655/**
6656 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6657 *
6658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6659 */
6660IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6661{
6662 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6663}
6664
6665
6666/**
6667 * Adds to the stack pointer.
6668 *
6669 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6670 * @param pCtx The CPU context which SP/ESP/RSP should be
6671 * updated.
6672 * @param cbToAdd The number of bytes to add (8-bit!).
6673 */
6674DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6675{
6676 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6677 pCtx->rsp += cbToAdd;
6678 else if (pCtx->ss.Attr.n.u1DefBig)
6679 pCtx->esp += cbToAdd;
6680 else
6681 pCtx->sp += cbToAdd;
6682}
6683
6684
6685/**
6686 * Subtracts from the stack pointer.
6687 *
6688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6689 * @param pCtx The CPU context which SP/ESP/RSP should be
6690 * updated.
6691 * @param cbToSub The number of bytes to subtract (8-bit!).
6692 */
6693DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6694{
6695 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6696 pCtx->rsp -= cbToSub;
6697 else if (pCtx->ss.Attr.n.u1DefBig)
6698 pCtx->esp -= cbToSub;
6699 else
6700 pCtx->sp -= cbToSub;
6701}
6702
6703
6704/**
6705 * Adds to the temporary stack pointer.
6706 *
6707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6708 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6709 * @param cbToAdd The number of bytes to add (16-bit).
6710 * @param pCtx Where to get the current stack mode.
6711 */
6712DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6713{
6714 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6715 pTmpRsp->u += cbToAdd;
6716 else if (pCtx->ss.Attr.n.u1DefBig)
6717 pTmpRsp->DWords.dw0 += cbToAdd;
6718 else
6719 pTmpRsp->Words.w0 += cbToAdd;
6720}
6721
6722
6723/**
6724 * Subtracts from the temporary stack pointer.
6725 *
6726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6727 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6728 * @param cbToSub The number of bytes to subtract.
6729 * @param pCtx Where to get the current stack mode.
6730 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6731 * expecting that.
6732 */
6733DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6734{
6735 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6736 pTmpRsp->u -= cbToSub;
6737 else if (pCtx->ss.Attr.n.u1DefBig)
6738 pTmpRsp->DWords.dw0 -= cbToSub;
6739 else
6740 pTmpRsp->Words.w0 -= cbToSub;
6741}
6742
6743
6744/**
6745 * Calculates the effective stack address for a push of the specified size as
6746 * well as the new RSP value (upper bits may be masked).
6747 *
6748 * @returns Effective stack addressf for the push.
6749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6750 * @param pCtx Where to get the current stack mode.
6751 * @param cbItem The size of the stack item to pop.
6752 * @param puNewRsp Where to return the new RSP value.
6753 */
6754DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6755{
6756 RTUINT64U uTmpRsp;
6757 RTGCPTR GCPtrTop;
6758 uTmpRsp.u = pCtx->rsp;
6759
6760 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6761 GCPtrTop = uTmpRsp.u -= cbItem;
6762 else if (pCtx->ss.Attr.n.u1DefBig)
6763 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6764 else
6765 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6766 *puNewRsp = uTmpRsp.u;
6767 return GCPtrTop;
6768}
6769
6770
6771/**
6772 * Gets the current stack pointer and calculates the value after a pop of the
6773 * specified size.
6774 *
6775 * @returns Current stack pointer.
6776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6777 * @param pCtx Where to get the current stack mode.
6778 * @param cbItem The size of the stack item to pop.
6779 * @param puNewRsp Where to return the new RSP value.
6780 */
6781DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6782{
6783 RTUINT64U uTmpRsp;
6784 RTGCPTR GCPtrTop;
6785 uTmpRsp.u = pCtx->rsp;
6786
6787 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6788 {
6789 GCPtrTop = uTmpRsp.u;
6790 uTmpRsp.u += cbItem;
6791 }
6792 else if (pCtx->ss.Attr.n.u1DefBig)
6793 {
6794 GCPtrTop = uTmpRsp.DWords.dw0;
6795 uTmpRsp.DWords.dw0 += cbItem;
6796 }
6797 else
6798 {
6799 GCPtrTop = uTmpRsp.Words.w0;
6800 uTmpRsp.Words.w0 += cbItem;
6801 }
6802 *puNewRsp = uTmpRsp.u;
6803 return GCPtrTop;
6804}
6805
6806
6807/**
6808 * Calculates the effective stack address for a push of the specified size as
6809 * well as the new temporary RSP value (upper bits may be masked).
6810 *
6811 * @returns Effective stack addressf for the push.
6812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6813 * @param pCtx Where to get the current stack mode.
6814 * @param pTmpRsp The temporary stack pointer. This is updated.
6815 * @param cbItem The size of the stack item to pop.
6816 */
6817DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6818{
6819 RTGCPTR GCPtrTop;
6820
6821 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6822 GCPtrTop = pTmpRsp->u -= cbItem;
6823 else if (pCtx->ss.Attr.n.u1DefBig)
6824 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6825 else
6826 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6827 return GCPtrTop;
6828}
6829
6830
6831/**
6832 * Gets the effective stack address for a pop of the specified size and
6833 * calculates and updates the temporary RSP.
6834 *
6835 * @returns Current stack pointer.
6836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6837 * @param pCtx Where to get the current stack mode.
6838 * @param pTmpRsp The temporary stack pointer. This is updated.
6839 * @param cbItem The size of the stack item to pop.
6840 */
6841DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6842{
6843 RTGCPTR GCPtrTop;
6844 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6845 {
6846 GCPtrTop = pTmpRsp->u;
6847 pTmpRsp->u += cbItem;
6848 }
6849 else if (pCtx->ss.Attr.n.u1DefBig)
6850 {
6851 GCPtrTop = pTmpRsp->DWords.dw0;
6852 pTmpRsp->DWords.dw0 += cbItem;
6853 }
6854 else
6855 {
6856 GCPtrTop = pTmpRsp->Words.w0;
6857 pTmpRsp->Words.w0 += cbItem;
6858 }
6859 return GCPtrTop;
6860}
6861
6862/** @} */
6863
6864
6865/** @name FPU access and helpers.
6866 *
6867 * @{
6868 */
6869
6870
6871/**
6872 * Hook for preparing to use the host FPU.
6873 *
6874 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6875 *
6876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6877 */
6878DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6879{
6880#ifdef IN_RING3
6881 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6882#else
6883 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6884#endif
6885}
6886
6887
6888/**
6889 * Hook for preparing to use the host FPU for SSE.
6890 *
6891 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6892 *
6893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6894 */
6895DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6896{
6897 iemFpuPrepareUsage(pVCpu);
6898}
6899
6900
6901/**
6902 * Hook for preparing to use the host FPU for AVX.
6903 *
6904 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6905 *
6906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6907 */
6908DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6909{
6910 iemFpuPrepareUsage(pVCpu);
6911}
6912
6913
6914/**
6915 * Hook for actualizing the guest FPU state before the interpreter reads it.
6916 *
6917 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6918 *
6919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6920 */
6921DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6922{
6923#ifdef IN_RING3
6924 NOREF(pVCpu);
6925#else
6926 CPUMRZFpuStateActualizeForRead(pVCpu);
6927#endif
6928}
6929
6930
6931/**
6932 * Hook for actualizing the guest FPU state before the interpreter changes it.
6933 *
6934 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6935 *
6936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6937 */
6938DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6939{
6940#ifdef IN_RING3
6941 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6942#else
6943 CPUMRZFpuStateActualizeForChange(pVCpu);
6944#endif
6945}
6946
6947
6948/**
6949 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6950 * only.
6951 *
6952 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6953 *
6954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6955 */
6956DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6957{
6958#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6959 NOREF(pVCpu);
6960#else
6961 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6962#endif
6963}
6964
6965
6966/**
6967 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6968 * read+write.
6969 *
6970 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6971 *
6972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6973 */
6974DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6975{
6976#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6977 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6978#else
6979 CPUMRZFpuStateActualizeForChange(pVCpu);
6980#endif
6981}
6982
6983
6984/**
6985 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6986 * only.
6987 *
6988 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6989 *
6990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6991 */
6992DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6993{
6994#ifdef IN_RING3
6995 NOREF(pVCpu);
6996#else
6997 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6998#endif
6999}
7000
7001
7002/**
7003 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7004 * read+write.
7005 *
7006 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7007 *
7008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7009 */
7010DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7011{
7012#ifdef IN_RING3
7013 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7014#else
7015 CPUMRZFpuStateActualizeForChange(pVCpu);
7016#endif
7017}
7018
7019
7020/**
7021 * Stores a QNaN value into a FPU register.
7022 *
7023 * @param pReg Pointer to the register.
7024 */
7025DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7026{
7027 pReg->au32[0] = UINT32_C(0x00000000);
7028 pReg->au32[1] = UINT32_C(0xc0000000);
7029 pReg->au16[4] = UINT16_C(0xffff);
7030}
7031
7032
7033/**
7034 * Updates the FOP, FPU.CS and FPUIP registers.
7035 *
7036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7037 * @param pCtx The CPU context.
7038 * @param pFpuCtx The FPU context.
7039 */
7040DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
7041{
7042 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7043 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7044 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7045 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7046 {
7047 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7048 * happens in real mode here based on the fnsave and fnstenv images. */
7049 pFpuCtx->CS = 0;
7050 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
7051 }
7052 else
7053 {
7054 pFpuCtx->CS = pCtx->cs.Sel;
7055 pFpuCtx->FPUIP = pCtx->rip;
7056 }
7057}
7058
7059
7060/**
7061 * Updates the x87.DS and FPUDP registers.
7062 *
7063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7064 * @param pCtx The CPU context.
7065 * @param pFpuCtx The FPU context.
7066 * @param iEffSeg The effective segment register.
7067 * @param GCPtrEff The effective address relative to @a iEffSeg.
7068 */
7069DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7070{
7071 RTSEL sel;
7072 switch (iEffSeg)
7073 {
7074 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7075 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7076 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7077 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7078 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7079 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7080 default:
7081 AssertMsgFailed(("%d\n", iEffSeg));
7082 sel = pCtx->ds.Sel;
7083 }
7084 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7085 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7086 {
7087 pFpuCtx->DS = 0;
7088 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7089 }
7090 else
7091 {
7092 pFpuCtx->DS = sel;
7093 pFpuCtx->FPUDP = GCPtrEff;
7094 }
7095}
7096
7097
7098/**
7099 * Rotates the stack registers in the push direction.
7100 *
7101 * @param pFpuCtx The FPU context.
7102 * @remarks This is a complete waste of time, but fxsave stores the registers in
7103 * stack order.
7104 */
7105DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7106{
7107 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7108 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7109 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7110 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7111 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7112 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7113 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7114 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7115 pFpuCtx->aRegs[0].r80 = r80Tmp;
7116}
7117
7118
7119/**
7120 * Rotates the stack registers in the pop direction.
7121 *
7122 * @param pFpuCtx The FPU context.
7123 * @remarks This is a complete waste of time, but fxsave stores the registers in
7124 * stack order.
7125 */
7126DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7127{
7128 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7129 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7130 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7131 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7132 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7133 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7134 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7135 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7136 pFpuCtx->aRegs[7].r80 = r80Tmp;
7137}
7138
7139
7140/**
7141 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7142 * exception prevents it.
7143 *
7144 * @param pResult The FPU operation result to push.
7145 * @param pFpuCtx The FPU context.
7146 */
7147IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7148{
7149 /* Update FSW and bail if there are pending exceptions afterwards. */
7150 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7151 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7152 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7153 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7154 {
7155 pFpuCtx->FSW = fFsw;
7156 return;
7157 }
7158
7159 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7160 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7161 {
7162 /* All is fine, push the actual value. */
7163 pFpuCtx->FTW |= RT_BIT(iNewTop);
7164 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7165 }
7166 else if (pFpuCtx->FCW & X86_FCW_IM)
7167 {
7168 /* Masked stack overflow, push QNaN. */
7169 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7170 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7171 }
7172 else
7173 {
7174 /* Raise stack overflow, don't push anything. */
7175 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7176 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7177 return;
7178 }
7179
7180 fFsw &= ~X86_FSW_TOP_MASK;
7181 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7182 pFpuCtx->FSW = fFsw;
7183
7184 iemFpuRotateStackPush(pFpuCtx);
7185}
7186
7187
7188/**
7189 * Stores a result in a FPU register and updates the FSW and FTW.
7190 *
7191 * @param pFpuCtx The FPU context.
7192 * @param pResult The result to store.
7193 * @param iStReg Which FPU register to store it in.
7194 */
7195IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7196{
7197 Assert(iStReg < 8);
7198 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7199 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7200 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7201 pFpuCtx->FTW |= RT_BIT(iReg);
7202 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7203}
7204
7205
7206/**
7207 * Only updates the FPU status word (FSW) with the result of the current
7208 * instruction.
7209 *
7210 * @param pFpuCtx The FPU context.
7211 * @param u16FSW The FSW output of the current instruction.
7212 */
7213IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7214{
7215 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7216 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7217}
7218
7219
7220/**
7221 * Pops one item off the FPU stack if no pending exception prevents it.
7222 *
7223 * @param pFpuCtx The FPU context.
7224 */
7225IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7226{
7227 /* Check pending exceptions. */
7228 uint16_t uFSW = pFpuCtx->FSW;
7229 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7230 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7231 return;
7232
7233 /* TOP--. */
7234 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7235 uFSW &= ~X86_FSW_TOP_MASK;
7236 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7237 pFpuCtx->FSW = uFSW;
7238
7239 /* Mark the previous ST0 as empty. */
7240 iOldTop >>= X86_FSW_TOP_SHIFT;
7241 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7242
7243 /* Rotate the registers. */
7244 iemFpuRotateStackPop(pFpuCtx);
7245}
7246
7247
7248/**
7249 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7250 *
7251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7252 * @param pResult The FPU operation result to push.
7253 */
7254IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7255{
7256 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7257 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7258 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7259 iemFpuMaybePushResult(pResult, pFpuCtx);
7260}
7261
7262
7263/**
7264 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7265 * and sets FPUDP and FPUDS.
7266 *
7267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7268 * @param pResult The FPU operation result to push.
7269 * @param iEffSeg The effective segment register.
7270 * @param GCPtrEff The effective address relative to @a iEffSeg.
7271 */
7272IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7273{
7274 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7275 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7276 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7277 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7278 iemFpuMaybePushResult(pResult, pFpuCtx);
7279}
7280
7281
7282/**
7283 * Replace ST0 with the first value and push the second onto the FPU stack,
7284 * unless a pending exception prevents it.
7285 *
7286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7287 * @param pResult The FPU operation result to store and push.
7288 */
7289IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7290{
7291 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7292 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7293 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7294
7295 /* Update FSW and bail if there are pending exceptions afterwards. */
7296 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7297 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7298 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7299 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7300 {
7301 pFpuCtx->FSW = fFsw;
7302 return;
7303 }
7304
7305 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7306 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7307 {
7308 /* All is fine, push the actual value. */
7309 pFpuCtx->FTW |= RT_BIT(iNewTop);
7310 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7311 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7312 }
7313 else if (pFpuCtx->FCW & X86_FCW_IM)
7314 {
7315 /* Masked stack overflow, push QNaN. */
7316 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7317 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7318 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7319 }
7320 else
7321 {
7322 /* Raise stack overflow, don't push anything. */
7323 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7324 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7325 return;
7326 }
7327
7328 fFsw &= ~X86_FSW_TOP_MASK;
7329 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7330 pFpuCtx->FSW = fFsw;
7331
7332 iemFpuRotateStackPush(pFpuCtx);
7333}
7334
7335
7336/**
7337 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7338 * FOP.
7339 *
7340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7341 * @param pResult The result to store.
7342 * @param iStReg Which FPU register to store it in.
7343 */
7344IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7345{
7346 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7347 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7348 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7349 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7350}
7351
7352
7353/**
7354 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7355 * FOP, and then pops the stack.
7356 *
7357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7358 * @param pResult The result to store.
7359 * @param iStReg Which FPU register to store it in.
7360 */
7361IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7362{
7363 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7364 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7365 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7366 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7367 iemFpuMaybePopOne(pFpuCtx);
7368}
7369
7370
7371/**
7372 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7373 * FPUDP, and FPUDS.
7374 *
7375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7376 * @param pResult The result to store.
7377 * @param iStReg Which FPU register to store it in.
7378 * @param iEffSeg The effective memory operand selector register.
7379 * @param GCPtrEff The effective memory operand offset.
7380 */
7381IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7382 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7383{
7384 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7385 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7386 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7387 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7388 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7389}
7390
7391
7392/**
7393 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7394 * FPUDP, and FPUDS, and then pops the stack.
7395 *
7396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7397 * @param pResult The result to store.
7398 * @param iStReg Which FPU register to store it in.
7399 * @param iEffSeg The effective memory operand selector register.
7400 * @param GCPtrEff The effective memory operand offset.
7401 */
7402IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7403 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7404{
7405 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7406 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7407 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7408 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7409 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7410 iemFpuMaybePopOne(pFpuCtx);
7411}
7412
7413
7414/**
7415 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7416 *
7417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7418 */
7419IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7420{
7421 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7422 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7423 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7424}
7425
7426
7427/**
7428 * Marks the specified stack register as free (for FFREE).
7429 *
7430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7431 * @param iStReg The register to free.
7432 */
7433IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7434{
7435 Assert(iStReg < 8);
7436 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7437 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7438 pFpuCtx->FTW &= ~RT_BIT(iReg);
7439}
7440
7441
7442/**
7443 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7444 *
7445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7446 */
7447IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7448{
7449 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7450 uint16_t uFsw = pFpuCtx->FSW;
7451 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7452 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7453 uFsw &= ~X86_FSW_TOP_MASK;
7454 uFsw |= uTop;
7455 pFpuCtx->FSW = uFsw;
7456}
7457
7458
7459/**
7460 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7461 *
7462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7463 */
7464IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7465{
7466 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7467 uint16_t uFsw = pFpuCtx->FSW;
7468 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7469 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7470 uFsw &= ~X86_FSW_TOP_MASK;
7471 uFsw |= uTop;
7472 pFpuCtx->FSW = uFsw;
7473}
7474
7475
7476/**
7477 * Updates the FSW, FOP, FPUIP, and FPUCS.
7478 *
7479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7480 * @param u16FSW The FSW from the current instruction.
7481 */
7482IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7483{
7484 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7485 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7486 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7487 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7488}
7489
7490
7491/**
7492 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7493 *
7494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7495 * @param u16FSW The FSW from the current instruction.
7496 */
7497IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7498{
7499 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7500 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7501 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7502 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7503 iemFpuMaybePopOne(pFpuCtx);
7504}
7505
7506
7507/**
7508 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7509 *
7510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7511 * @param u16FSW The FSW from the current instruction.
7512 * @param iEffSeg The effective memory operand selector register.
7513 * @param GCPtrEff The effective memory operand offset.
7514 */
7515IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7516{
7517 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7518 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7519 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7520 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7521 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7522}
7523
7524
7525/**
7526 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7527 *
7528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7529 * @param u16FSW The FSW from the current instruction.
7530 */
7531IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7532{
7533 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7534 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7535 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7536 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7537 iemFpuMaybePopOne(pFpuCtx);
7538 iemFpuMaybePopOne(pFpuCtx);
7539}
7540
7541
7542/**
7543 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7544 *
7545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7546 * @param u16FSW The FSW from the current instruction.
7547 * @param iEffSeg The effective memory operand selector register.
7548 * @param GCPtrEff The effective memory operand offset.
7549 */
7550IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7551{
7552 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7553 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7554 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7555 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7556 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7557 iemFpuMaybePopOne(pFpuCtx);
7558}
7559
7560
7561/**
7562 * Worker routine for raising an FPU stack underflow exception.
7563 *
7564 * @param pFpuCtx The FPU context.
7565 * @param iStReg The stack register being accessed.
7566 */
7567IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7568{
7569 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7570 if (pFpuCtx->FCW & X86_FCW_IM)
7571 {
7572 /* Masked underflow. */
7573 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7574 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7575 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7576 if (iStReg != UINT8_MAX)
7577 {
7578 pFpuCtx->FTW |= RT_BIT(iReg);
7579 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7580 }
7581 }
7582 else
7583 {
7584 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7585 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7586 }
7587}
7588
7589
7590/**
7591 * Raises a FPU stack underflow exception.
7592 *
7593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7594 * @param iStReg The destination register that should be loaded
7595 * with QNaN if \#IS is not masked. Specify
7596 * UINT8_MAX if none (like for fcom).
7597 */
7598DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7599{
7600 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7601 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7602 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7603 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7604}
7605
7606
7607DECL_NO_INLINE(IEM_STATIC, void)
7608iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7609{
7610 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7611 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7612 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7613 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7614 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7615}
7616
7617
7618DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7619{
7620 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7621 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7622 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7623 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7624 iemFpuMaybePopOne(pFpuCtx);
7625}
7626
7627
7628DECL_NO_INLINE(IEM_STATIC, void)
7629iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7630{
7631 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7632 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7633 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7634 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7635 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7636 iemFpuMaybePopOne(pFpuCtx);
7637}
7638
7639
7640DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7641{
7642 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7643 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7644 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7645 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7646 iemFpuMaybePopOne(pFpuCtx);
7647 iemFpuMaybePopOne(pFpuCtx);
7648}
7649
7650
7651DECL_NO_INLINE(IEM_STATIC, void)
7652iemFpuStackPushUnderflow(PVMCPU pVCpu)
7653{
7654 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7655 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7656 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7657
7658 if (pFpuCtx->FCW & X86_FCW_IM)
7659 {
7660 /* Masked overflow - Push QNaN. */
7661 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7662 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7663 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7664 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7665 pFpuCtx->FTW |= RT_BIT(iNewTop);
7666 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7667 iemFpuRotateStackPush(pFpuCtx);
7668 }
7669 else
7670 {
7671 /* Exception pending - don't change TOP or the register stack. */
7672 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7673 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7674 }
7675}
7676
7677
7678DECL_NO_INLINE(IEM_STATIC, void)
7679iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7680{
7681 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7682 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7683 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7684
7685 if (pFpuCtx->FCW & X86_FCW_IM)
7686 {
7687 /* Masked overflow - Push QNaN. */
7688 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7689 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7690 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7691 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7692 pFpuCtx->FTW |= RT_BIT(iNewTop);
7693 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7694 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7695 iemFpuRotateStackPush(pFpuCtx);
7696 }
7697 else
7698 {
7699 /* Exception pending - don't change TOP or the register stack. */
7700 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7701 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7702 }
7703}
7704
7705
7706/**
7707 * Worker routine for raising an FPU stack overflow exception on a push.
7708 *
7709 * @param pFpuCtx The FPU context.
7710 */
7711IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7712{
7713 if (pFpuCtx->FCW & X86_FCW_IM)
7714 {
7715 /* Masked overflow. */
7716 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7717 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7718 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7719 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7720 pFpuCtx->FTW |= RT_BIT(iNewTop);
7721 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7722 iemFpuRotateStackPush(pFpuCtx);
7723 }
7724 else
7725 {
7726 /* Exception pending - don't change TOP or the register stack. */
7727 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7728 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7729 }
7730}
7731
7732
7733/**
7734 * Raises a FPU stack overflow exception on a push.
7735 *
7736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7737 */
7738DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7739{
7740 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7741 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7742 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7743 iemFpuStackPushOverflowOnly(pFpuCtx);
7744}
7745
7746
7747/**
7748 * Raises a FPU stack overflow exception on a push with a memory operand.
7749 *
7750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7751 * @param iEffSeg The effective memory operand selector register.
7752 * @param GCPtrEff The effective memory operand offset.
7753 */
7754DECL_NO_INLINE(IEM_STATIC, void)
7755iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7756{
7757 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7758 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7759 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7760 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7761 iemFpuStackPushOverflowOnly(pFpuCtx);
7762}
7763
7764
7765IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7766{
7767 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7768 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7769 if (pFpuCtx->FTW & RT_BIT(iReg))
7770 return VINF_SUCCESS;
7771 return VERR_NOT_FOUND;
7772}
7773
7774
7775IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7776{
7777 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7778 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7779 if (pFpuCtx->FTW & RT_BIT(iReg))
7780 {
7781 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7782 return VINF_SUCCESS;
7783 }
7784 return VERR_NOT_FOUND;
7785}
7786
7787
7788IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7789 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7790{
7791 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7792 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7793 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7794 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7795 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7796 {
7797 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7798 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7799 return VINF_SUCCESS;
7800 }
7801 return VERR_NOT_FOUND;
7802}
7803
7804
7805IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7806{
7807 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7808 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7809 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7810 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7811 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7812 {
7813 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7814 return VINF_SUCCESS;
7815 }
7816 return VERR_NOT_FOUND;
7817}
7818
7819
7820/**
7821 * Updates the FPU exception status after FCW is changed.
7822 *
7823 * @param pFpuCtx The FPU context.
7824 */
7825IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7826{
7827 uint16_t u16Fsw = pFpuCtx->FSW;
7828 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7829 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7830 else
7831 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7832 pFpuCtx->FSW = u16Fsw;
7833}
7834
7835
7836/**
7837 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7838 *
7839 * @returns The full FTW.
7840 * @param pFpuCtx The FPU context.
7841 */
7842IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7843{
7844 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7845 uint16_t u16Ftw = 0;
7846 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7847 for (unsigned iSt = 0; iSt < 8; iSt++)
7848 {
7849 unsigned const iReg = (iSt + iTop) & 7;
7850 if (!(u8Ftw & RT_BIT(iReg)))
7851 u16Ftw |= 3 << (iReg * 2); /* empty */
7852 else
7853 {
7854 uint16_t uTag;
7855 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7856 if (pr80Reg->s.uExponent == 0x7fff)
7857 uTag = 2; /* Exponent is all 1's => Special. */
7858 else if (pr80Reg->s.uExponent == 0x0000)
7859 {
7860 if (pr80Reg->s.u64Mantissa == 0x0000)
7861 uTag = 1; /* All bits are zero => Zero. */
7862 else
7863 uTag = 2; /* Must be special. */
7864 }
7865 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7866 uTag = 0; /* Valid. */
7867 else
7868 uTag = 2; /* Must be special. */
7869
7870 u16Ftw |= uTag << (iReg * 2); /* empty */
7871 }
7872 }
7873
7874 return u16Ftw;
7875}
7876
7877
7878/**
7879 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7880 *
7881 * @returns The compressed FTW.
7882 * @param u16FullFtw The full FTW to convert.
7883 */
7884IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7885{
7886 uint8_t u8Ftw = 0;
7887 for (unsigned i = 0; i < 8; i++)
7888 {
7889 if ((u16FullFtw & 3) != 3 /*empty*/)
7890 u8Ftw |= RT_BIT(i);
7891 u16FullFtw >>= 2;
7892 }
7893
7894 return u8Ftw;
7895}
7896
7897/** @} */
7898
7899
7900/** @name Memory access.
7901 *
7902 * @{
7903 */
7904
7905
7906/**
7907 * Updates the IEMCPU::cbWritten counter if applicable.
7908 *
7909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7910 * @param fAccess The access being accounted for.
7911 * @param cbMem The access size.
7912 */
7913DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7914{
7915 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7916 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7917 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7918}
7919
7920
7921/**
7922 * Checks if the given segment can be written to, raise the appropriate
7923 * exception if not.
7924 *
7925 * @returns VBox strict status code.
7926 *
7927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7928 * @param pHid Pointer to the hidden register.
7929 * @param iSegReg The register number.
7930 * @param pu64BaseAddr Where to return the base address to use for the
7931 * segment. (In 64-bit code it may differ from the
7932 * base in the hidden segment.)
7933 */
7934IEM_STATIC VBOXSTRICTRC
7935iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7936{
7937 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7938 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7939 else
7940 {
7941 if (!pHid->Attr.n.u1Present)
7942 {
7943 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7944 AssertRelease(uSel == 0);
7945 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7946 return iemRaiseGeneralProtectionFault0(pVCpu);
7947 }
7948
7949 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7950 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7951 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7952 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7953 *pu64BaseAddr = pHid->u64Base;
7954 }
7955 return VINF_SUCCESS;
7956}
7957
7958
7959/**
7960 * Checks if the given segment can be read from, raise the appropriate
7961 * exception if not.
7962 *
7963 * @returns VBox strict status code.
7964 *
7965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7966 * @param pHid Pointer to the hidden register.
7967 * @param iSegReg The register number.
7968 * @param pu64BaseAddr Where to return the base address to use for the
7969 * segment. (In 64-bit code it may differ from the
7970 * base in the hidden segment.)
7971 */
7972IEM_STATIC VBOXSTRICTRC
7973iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7974{
7975 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7976 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7977 else
7978 {
7979 if (!pHid->Attr.n.u1Present)
7980 {
7981 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7982 AssertRelease(uSel == 0);
7983 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7984 return iemRaiseGeneralProtectionFault0(pVCpu);
7985 }
7986
7987 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7988 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7989 *pu64BaseAddr = pHid->u64Base;
7990 }
7991 return VINF_SUCCESS;
7992}
7993
7994
7995/**
7996 * Applies the segment limit, base and attributes.
7997 *
7998 * This may raise a \#GP or \#SS.
7999 *
8000 * @returns VBox strict status code.
8001 *
8002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8003 * @param fAccess The kind of access which is being performed.
8004 * @param iSegReg The index of the segment register to apply.
8005 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8006 * TSS, ++).
8007 * @param cbMem The access size.
8008 * @param pGCPtrMem Pointer to the guest memory address to apply
8009 * segmentation to. Input and output parameter.
8010 */
8011IEM_STATIC VBOXSTRICTRC
8012iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8013{
8014 if (iSegReg == UINT8_MAX)
8015 return VINF_SUCCESS;
8016
8017 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8018 switch (pVCpu->iem.s.enmCpuMode)
8019 {
8020 case IEMMODE_16BIT:
8021 case IEMMODE_32BIT:
8022 {
8023 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8024 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8025
8026 if ( pSel->Attr.n.u1Present
8027 && !pSel->Attr.n.u1Unusable)
8028 {
8029 Assert(pSel->Attr.n.u1DescType);
8030 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8031 {
8032 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8033 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8034 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8035
8036 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8037 {
8038 /** @todo CPL check. */
8039 }
8040
8041 /*
8042 * There are two kinds of data selectors, normal and expand down.
8043 */
8044 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8045 {
8046 if ( GCPtrFirst32 > pSel->u32Limit
8047 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8048 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8049 }
8050 else
8051 {
8052 /*
8053 * The upper boundary is defined by the B bit, not the G bit!
8054 */
8055 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8056 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8057 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8058 }
8059 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8060 }
8061 else
8062 {
8063
8064 /*
8065 * Code selector and usually be used to read thru, writing is
8066 * only permitted in real and V8086 mode.
8067 */
8068 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8069 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8070 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8071 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8072 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8073
8074 if ( GCPtrFirst32 > pSel->u32Limit
8075 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8076 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8077
8078 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8079 {
8080 /** @todo CPL check. */
8081 }
8082
8083 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8084 }
8085 }
8086 else
8087 return iemRaiseGeneralProtectionFault0(pVCpu);
8088 return VINF_SUCCESS;
8089 }
8090
8091 case IEMMODE_64BIT:
8092 {
8093 RTGCPTR GCPtrMem = *pGCPtrMem;
8094 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8095 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8096
8097 Assert(cbMem >= 1);
8098 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8099 return VINF_SUCCESS;
8100 return iemRaiseGeneralProtectionFault0(pVCpu);
8101 }
8102
8103 default:
8104 AssertFailedReturn(VERR_IEM_IPE_7);
8105 }
8106}
8107
8108
8109/**
8110 * Translates a virtual address to a physical physical address and checks if we
8111 * can access the page as specified.
8112 *
8113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8114 * @param GCPtrMem The virtual address.
8115 * @param fAccess The intended access.
8116 * @param pGCPhysMem Where to return the physical address.
8117 */
8118IEM_STATIC VBOXSTRICTRC
8119iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8120{
8121 /** @todo Need a different PGM interface here. We're currently using
8122 * generic / REM interfaces. this won't cut it for R0 & RC. */
8123 RTGCPHYS GCPhys;
8124 uint64_t fFlags;
8125 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8126 if (RT_FAILURE(rc))
8127 {
8128 /** @todo Check unassigned memory in unpaged mode. */
8129 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8130 *pGCPhysMem = NIL_RTGCPHYS;
8131 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8132 }
8133
8134 /* If the page is writable and does not have the no-exec bit set, all
8135 access is allowed. Otherwise we'll have to check more carefully... */
8136 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8137 {
8138 /* Write to read only memory? */
8139 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8140 && !(fFlags & X86_PTE_RW)
8141 && ( (pVCpu->iem.s.uCpl == 3
8142 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8143 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8144 {
8145 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8146 *pGCPhysMem = NIL_RTGCPHYS;
8147 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8148 }
8149
8150 /* Kernel memory accessed by userland? */
8151 if ( !(fFlags & X86_PTE_US)
8152 && pVCpu->iem.s.uCpl == 3
8153 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8154 {
8155 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8156 *pGCPhysMem = NIL_RTGCPHYS;
8157 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8158 }
8159
8160 /* Executing non-executable memory? */
8161 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8162 && (fFlags & X86_PTE_PAE_NX)
8163 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8164 {
8165 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8166 *pGCPhysMem = NIL_RTGCPHYS;
8167 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8168 VERR_ACCESS_DENIED);
8169 }
8170 }
8171
8172 /*
8173 * Set the dirty / access flags.
8174 * ASSUMES this is set when the address is translated rather than on committ...
8175 */
8176 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8177 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8178 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8179 {
8180 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8181 AssertRC(rc2);
8182 }
8183
8184 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8185 *pGCPhysMem = GCPhys;
8186 return VINF_SUCCESS;
8187}
8188
8189
8190
8191/**
8192 * Maps a physical page.
8193 *
8194 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8196 * @param GCPhysMem The physical address.
8197 * @param fAccess The intended access.
8198 * @param ppvMem Where to return the mapping address.
8199 * @param pLock The PGM lock.
8200 */
8201IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8202{
8203#ifdef IEM_VERIFICATION_MODE_FULL
8204 /* Force the alternative path so we can ignore writes. */
8205 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8206 {
8207 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8208 {
8209 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8210 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8211 if (RT_FAILURE(rc2))
8212 pVCpu->iem.s.fProblematicMemory = true;
8213 }
8214 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8215 }
8216#endif
8217#ifdef IEM_LOG_MEMORY_WRITES
8218 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8219 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8220#endif
8221#ifdef IEM_VERIFICATION_MODE_MINIMAL
8222 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8223#endif
8224
8225 /** @todo This API may require some improving later. A private deal with PGM
8226 * regarding locking and unlocking needs to be struct. A couple of TLBs
8227 * living in PGM, but with publicly accessible inlined access methods
8228 * could perhaps be an even better solution. */
8229 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8230 GCPhysMem,
8231 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8232 pVCpu->iem.s.fBypassHandlers,
8233 ppvMem,
8234 pLock);
8235 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8236 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8237
8238#ifdef IEM_VERIFICATION_MODE_FULL
8239 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8240 pVCpu->iem.s.fProblematicMemory = true;
8241#endif
8242 return rc;
8243}
8244
8245
8246/**
8247 * Unmap a page previously mapped by iemMemPageMap.
8248 *
8249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8250 * @param GCPhysMem The physical address.
8251 * @param fAccess The intended access.
8252 * @param pvMem What iemMemPageMap returned.
8253 * @param pLock The PGM lock.
8254 */
8255DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8256{
8257 NOREF(pVCpu);
8258 NOREF(GCPhysMem);
8259 NOREF(fAccess);
8260 NOREF(pvMem);
8261 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8262}
8263
8264
8265/**
8266 * Looks up a memory mapping entry.
8267 *
8268 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8270 * @param pvMem The memory address.
8271 * @param fAccess The access to.
8272 */
8273DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8274{
8275 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8276 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8277 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8278 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8279 return 0;
8280 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8281 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8282 return 1;
8283 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8284 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8285 return 2;
8286 return VERR_NOT_FOUND;
8287}
8288
8289
8290/**
8291 * Finds a free memmap entry when using iNextMapping doesn't work.
8292 *
8293 * @returns Memory mapping index, 1024 on failure.
8294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8295 */
8296IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8297{
8298 /*
8299 * The easy case.
8300 */
8301 if (pVCpu->iem.s.cActiveMappings == 0)
8302 {
8303 pVCpu->iem.s.iNextMapping = 1;
8304 return 0;
8305 }
8306
8307 /* There should be enough mappings for all instructions. */
8308 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8309
8310 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8311 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8312 return i;
8313
8314 AssertFailedReturn(1024);
8315}
8316
8317
8318/**
8319 * Commits a bounce buffer that needs writing back and unmaps it.
8320 *
8321 * @returns Strict VBox status code.
8322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8323 * @param iMemMap The index of the buffer to commit.
8324 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8325 * Always false in ring-3, obviously.
8326 */
8327IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8328{
8329 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8330 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8331#ifdef IN_RING3
8332 Assert(!fPostponeFail);
8333 RT_NOREF_PV(fPostponeFail);
8334#endif
8335
8336 /*
8337 * Do the writing.
8338 */
8339#ifndef IEM_VERIFICATION_MODE_MINIMAL
8340 PVM pVM = pVCpu->CTX_SUFF(pVM);
8341 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8342 && !IEM_VERIFICATION_ENABLED(pVCpu))
8343 {
8344 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8345 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8346 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8347 if (!pVCpu->iem.s.fBypassHandlers)
8348 {
8349 /*
8350 * Carefully and efficiently dealing with access handler return
8351 * codes make this a little bloated.
8352 */
8353 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8354 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8355 pbBuf,
8356 cbFirst,
8357 PGMACCESSORIGIN_IEM);
8358 if (rcStrict == VINF_SUCCESS)
8359 {
8360 if (cbSecond)
8361 {
8362 rcStrict = PGMPhysWrite(pVM,
8363 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8364 pbBuf + cbFirst,
8365 cbSecond,
8366 PGMACCESSORIGIN_IEM);
8367 if (rcStrict == VINF_SUCCESS)
8368 { /* nothing */ }
8369 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8370 {
8371 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8372 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8373 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8374 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8375 }
8376# ifndef IN_RING3
8377 else if (fPostponeFail)
8378 {
8379 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8380 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8381 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8382 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8383 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8384 return iemSetPassUpStatus(pVCpu, rcStrict);
8385 }
8386# endif
8387 else
8388 {
8389 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8390 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8392 return rcStrict;
8393 }
8394 }
8395 }
8396 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8397 {
8398 if (!cbSecond)
8399 {
8400 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8401 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8402 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8403 }
8404 else
8405 {
8406 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8407 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8408 pbBuf + cbFirst,
8409 cbSecond,
8410 PGMACCESSORIGIN_IEM);
8411 if (rcStrict2 == VINF_SUCCESS)
8412 {
8413 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8414 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8415 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8416 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8417 }
8418 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8419 {
8420 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8421 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8422 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8423 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8424 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8425 }
8426# ifndef IN_RING3
8427 else if (fPostponeFail)
8428 {
8429 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8430 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8431 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8432 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8433 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8434 return iemSetPassUpStatus(pVCpu, rcStrict);
8435 }
8436# endif
8437 else
8438 {
8439 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8440 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8441 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8442 return rcStrict2;
8443 }
8444 }
8445 }
8446# ifndef IN_RING3
8447 else if (fPostponeFail)
8448 {
8449 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8450 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8451 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8452 if (!cbSecond)
8453 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8454 else
8455 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8456 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8457 return iemSetPassUpStatus(pVCpu, rcStrict);
8458 }
8459# endif
8460 else
8461 {
8462 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8463 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8464 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8465 return rcStrict;
8466 }
8467 }
8468 else
8469 {
8470 /*
8471 * No access handlers, much simpler.
8472 */
8473 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8474 if (RT_SUCCESS(rc))
8475 {
8476 if (cbSecond)
8477 {
8478 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8479 if (RT_SUCCESS(rc))
8480 { /* likely */ }
8481 else
8482 {
8483 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8484 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8485 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8486 return rc;
8487 }
8488 }
8489 }
8490 else
8491 {
8492 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8493 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8494 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8495 return rc;
8496 }
8497 }
8498 }
8499#endif
8500
8501#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8502 /*
8503 * Record the write(s).
8504 */
8505 if (!pVCpu->iem.s.fNoRem)
8506 {
8507 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8508 if (pEvtRec)
8509 {
8510 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8511 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8512 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8513 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8514 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8515 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8516 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8517 }
8518 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8519 {
8520 pEvtRec = iemVerifyAllocRecord(pVCpu);
8521 if (pEvtRec)
8522 {
8523 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8524 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8525 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8526 memcpy(pEvtRec->u.RamWrite.ab,
8527 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8528 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8529 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8530 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8531 }
8532 }
8533 }
8534#endif
8535#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8536 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8537 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8538 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8539 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8540 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8541 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8542
8543 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8544 g_cbIemWrote = cbWrote;
8545 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8546#endif
8547
8548 /*
8549 * Free the mapping entry.
8550 */
8551 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8552 Assert(pVCpu->iem.s.cActiveMappings != 0);
8553 pVCpu->iem.s.cActiveMappings--;
8554 return VINF_SUCCESS;
8555}
8556
8557
8558/**
8559 * iemMemMap worker that deals with a request crossing pages.
8560 */
8561IEM_STATIC VBOXSTRICTRC
8562iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8563{
8564 /*
8565 * Do the address translations.
8566 */
8567 RTGCPHYS GCPhysFirst;
8568 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8569 if (rcStrict != VINF_SUCCESS)
8570 return rcStrict;
8571
8572 RTGCPHYS GCPhysSecond;
8573 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8574 fAccess, &GCPhysSecond);
8575 if (rcStrict != VINF_SUCCESS)
8576 return rcStrict;
8577 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8578
8579 PVM pVM = pVCpu->CTX_SUFF(pVM);
8580#ifdef IEM_VERIFICATION_MODE_FULL
8581 /*
8582 * Detect problematic memory when verifying so we can select
8583 * the right execution engine. (TLB: Redo this.)
8584 */
8585 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8586 {
8587 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8588 if (RT_SUCCESS(rc2))
8589 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8590 if (RT_FAILURE(rc2))
8591 pVCpu->iem.s.fProblematicMemory = true;
8592 }
8593#endif
8594
8595
8596 /*
8597 * Read in the current memory content if it's a read, execute or partial
8598 * write access.
8599 */
8600 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8601 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8602 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8603
8604 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8605 {
8606 if (!pVCpu->iem.s.fBypassHandlers)
8607 {
8608 /*
8609 * Must carefully deal with access handler status codes here,
8610 * makes the code a bit bloated.
8611 */
8612 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8613 if (rcStrict == VINF_SUCCESS)
8614 {
8615 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8616 if (rcStrict == VINF_SUCCESS)
8617 { /*likely */ }
8618 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8619 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8620 else
8621 {
8622 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8623 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8624 return rcStrict;
8625 }
8626 }
8627 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8628 {
8629 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8630 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8631 {
8632 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8633 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8634 }
8635 else
8636 {
8637 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8638 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8639 return rcStrict2;
8640 }
8641 }
8642 else
8643 {
8644 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8645 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8646 return rcStrict;
8647 }
8648 }
8649 else
8650 {
8651 /*
8652 * No informational status codes here, much more straight forward.
8653 */
8654 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8655 if (RT_SUCCESS(rc))
8656 {
8657 Assert(rc == VINF_SUCCESS);
8658 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8659 if (RT_SUCCESS(rc))
8660 Assert(rc == VINF_SUCCESS);
8661 else
8662 {
8663 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8664 return rc;
8665 }
8666 }
8667 else
8668 {
8669 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8670 return rc;
8671 }
8672 }
8673
8674#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8675 if ( !pVCpu->iem.s.fNoRem
8676 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8677 {
8678 /*
8679 * Record the reads.
8680 */
8681 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8682 if (pEvtRec)
8683 {
8684 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8685 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8686 pEvtRec->u.RamRead.cb = cbFirstPage;
8687 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8688 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8689 }
8690 pEvtRec = iemVerifyAllocRecord(pVCpu);
8691 if (pEvtRec)
8692 {
8693 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8694 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8695 pEvtRec->u.RamRead.cb = cbSecondPage;
8696 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8697 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8698 }
8699 }
8700#endif
8701 }
8702#ifdef VBOX_STRICT
8703 else
8704 memset(pbBuf, 0xcc, cbMem);
8705 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8706 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8707#endif
8708
8709 /*
8710 * Commit the bounce buffer entry.
8711 */
8712 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8713 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8714 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8715 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8716 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8717 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8718 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8719 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8720 pVCpu->iem.s.cActiveMappings++;
8721
8722 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8723 *ppvMem = pbBuf;
8724 return VINF_SUCCESS;
8725}
8726
8727
8728/**
8729 * iemMemMap woker that deals with iemMemPageMap failures.
8730 */
8731IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8732 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8733{
8734 /*
8735 * Filter out conditions we can handle and the ones which shouldn't happen.
8736 */
8737 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8738 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8739 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8740 {
8741 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8742 return rcMap;
8743 }
8744 pVCpu->iem.s.cPotentialExits++;
8745
8746 /*
8747 * Read in the current memory content if it's a read, execute or partial
8748 * write access.
8749 */
8750 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8751 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8752 {
8753 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8754 memset(pbBuf, 0xff, cbMem);
8755 else
8756 {
8757 int rc;
8758 if (!pVCpu->iem.s.fBypassHandlers)
8759 {
8760 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8761 if (rcStrict == VINF_SUCCESS)
8762 { /* nothing */ }
8763 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8764 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8765 else
8766 {
8767 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8768 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8769 return rcStrict;
8770 }
8771 }
8772 else
8773 {
8774 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8775 if (RT_SUCCESS(rc))
8776 { /* likely */ }
8777 else
8778 {
8779 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8780 GCPhysFirst, rc));
8781 return rc;
8782 }
8783 }
8784 }
8785
8786#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8787 if ( !pVCpu->iem.s.fNoRem
8788 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8789 {
8790 /*
8791 * Record the read.
8792 */
8793 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8794 if (pEvtRec)
8795 {
8796 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8797 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8798 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8799 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8800 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8801 }
8802 }
8803#endif
8804 }
8805#ifdef VBOX_STRICT
8806 else
8807 memset(pbBuf, 0xcc, cbMem);
8808#endif
8809#ifdef VBOX_STRICT
8810 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8811 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8812#endif
8813
8814 /*
8815 * Commit the bounce buffer entry.
8816 */
8817 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8818 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8819 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8820 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8821 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8822 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8823 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8824 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8825 pVCpu->iem.s.cActiveMappings++;
8826
8827 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8828 *ppvMem = pbBuf;
8829 return VINF_SUCCESS;
8830}
8831
8832
8833
8834/**
8835 * Maps the specified guest memory for the given kind of access.
8836 *
8837 * This may be using bounce buffering of the memory if it's crossing a page
8838 * boundary or if there is an access handler installed for any of it. Because
8839 * of lock prefix guarantees, we're in for some extra clutter when this
8840 * happens.
8841 *
8842 * This may raise a \#GP, \#SS, \#PF or \#AC.
8843 *
8844 * @returns VBox strict status code.
8845 *
8846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8847 * @param ppvMem Where to return the pointer to the mapped
8848 * memory.
8849 * @param cbMem The number of bytes to map. This is usually 1,
8850 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8851 * string operations it can be up to a page.
8852 * @param iSegReg The index of the segment register to use for
8853 * this access. The base and limits are checked.
8854 * Use UINT8_MAX to indicate that no segmentation
8855 * is required (for IDT, GDT and LDT accesses).
8856 * @param GCPtrMem The address of the guest memory.
8857 * @param fAccess How the memory is being accessed. The
8858 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8859 * how to map the memory, while the
8860 * IEM_ACCESS_WHAT_XXX bit is used when raising
8861 * exceptions.
8862 */
8863IEM_STATIC VBOXSTRICTRC
8864iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8865{
8866 /*
8867 * Check the input and figure out which mapping entry to use.
8868 */
8869 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8870 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8871 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8872
8873 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8874 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8875 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8876 {
8877 iMemMap = iemMemMapFindFree(pVCpu);
8878 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8879 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8880 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8881 pVCpu->iem.s.aMemMappings[2].fAccess),
8882 VERR_IEM_IPE_9);
8883 }
8884
8885 /*
8886 * Map the memory, checking that we can actually access it. If something
8887 * slightly complicated happens, fall back on bounce buffering.
8888 */
8889 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8890 if (rcStrict != VINF_SUCCESS)
8891 return rcStrict;
8892
8893 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8894 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8895
8896 RTGCPHYS GCPhysFirst;
8897 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8898 if (rcStrict != VINF_SUCCESS)
8899 return rcStrict;
8900
8901 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8902 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8903 if (fAccess & IEM_ACCESS_TYPE_READ)
8904 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8905
8906 void *pvMem;
8907 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8908 if (rcStrict != VINF_SUCCESS)
8909 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8910
8911 /*
8912 * Fill in the mapping table entry.
8913 */
8914 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8915 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8916 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8917 pVCpu->iem.s.cActiveMappings++;
8918
8919 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8920 *ppvMem = pvMem;
8921 return VINF_SUCCESS;
8922}
8923
8924
8925/**
8926 * Commits the guest memory if bounce buffered and unmaps it.
8927 *
8928 * @returns Strict VBox status code.
8929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8930 * @param pvMem The mapping.
8931 * @param fAccess The kind of access.
8932 */
8933IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8934{
8935 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8936 AssertReturn(iMemMap >= 0, iMemMap);
8937
8938 /* If it's bounce buffered, we may need to write back the buffer. */
8939 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8940 {
8941 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8942 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8943 }
8944 /* Otherwise unlock it. */
8945 else
8946 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8947
8948 /* Free the entry. */
8949 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8950 Assert(pVCpu->iem.s.cActiveMappings != 0);
8951 pVCpu->iem.s.cActiveMappings--;
8952 return VINF_SUCCESS;
8953}
8954
8955#ifdef IEM_WITH_SETJMP
8956
8957/**
8958 * Maps the specified guest memory for the given kind of access, longjmp on
8959 * error.
8960 *
8961 * This may be using bounce buffering of the memory if it's crossing a page
8962 * boundary or if there is an access handler installed for any of it. Because
8963 * of lock prefix guarantees, we're in for some extra clutter when this
8964 * happens.
8965 *
8966 * This may raise a \#GP, \#SS, \#PF or \#AC.
8967 *
8968 * @returns Pointer to the mapped memory.
8969 *
8970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8971 * @param cbMem The number of bytes to map. This is usually 1,
8972 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8973 * string operations it can be up to a page.
8974 * @param iSegReg The index of the segment register to use for
8975 * this access. The base and limits are checked.
8976 * Use UINT8_MAX to indicate that no segmentation
8977 * is required (for IDT, GDT and LDT accesses).
8978 * @param GCPtrMem The address of the guest memory.
8979 * @param fAccess How the memory is being accessed. The
8980 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8981 * how to map the memory, while the
8982 * IEM_ACCESS_WHAT_XXX bit is used when raising
8983 * exceptions.
8984 */
8985IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8986{
8987 /*
8988 * Check the input and figure out which mapping entry to use.
8989 */
8990 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8991 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8992 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8993
8994 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8995 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8996 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8997 {
8998 iMemMap = iemMemMapFindFree(pVCpu);
8999 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
9000 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9001 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9002 pVCpu->iem.s.aMemMappings[2].fAccess),
9003 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
9004 }
9005
9006 /*
9007 * Map the memory, checking that we can actually access it. If something
9008 * slightly complicated happens, fall back on bounce buffering.
9009 */
9010 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9011 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9012 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9013
9014 /* Crossing a page boundary? */
9015 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9016 { /* No (likely). */ }
9017 else
9018 {
9019 void *pvMem;
9020 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9021 if (rcStrict == VINF_SUCCESS)
9022 return pvMem;
9023 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9024 }
9025
9026 RTGCPHYS GCPhysFirst;
9027 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9028 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9029 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9030
9031 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9032 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9033 if (fAccess & IEM_ACCESS_TYPE_READ)
9034 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9035
9036 void *pvMem;
9037 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9038 if (rcStrict == VINF_SUCCESS)
9039 { /* likely */ }
9040 else
9041 {
9042 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9043 if (rcStrict == VINF_SUCCESS)
9044 return pvMem;
9045 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9046 }
9047
9048 /*
9049 * Fill in the mapping table entry.
9050 */
9051 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9052 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9053 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9054 pVCpu->iem.s.cActiveMappings++;
9055
9056 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9057 return pvMem;
9058}
9059
9060
9061/**
9062 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9063 *
9064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9065 * @param pvMem The mapping.
9066 * @param fAccess The kind of access.
9067 */
9068IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9069{
9070 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9071 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9072
9073 /* If it's bounce buffered, we may need to write back the buffer. */
9074 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9075 {
9076 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9077 {
9078 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9079 if (rcStrict == VINF_SUCCESS)
9080 return;
9081 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9082 }
9083 }
9084 /* Otherwise unlock it. */
9085 else
9086 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9087
9088 /* Free the entry. */
9089 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9090 Assert(pVCpu->iem.s.cActiveMappings != 0);
9091 pVCpu->iem.s.cActiveMappings--;
9092}
9093
9094#endif
9095
9096#ifndef IN_RING3
9097/**
9098 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9099 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9100 *
9101 * Allows the instruction to be completed and retired, while the IEM user will
9102 * return to ring-3 immediately afterwards and do the postponed writes there.
9103 *
9104 * @returns VBox status code (no strict statuses). Caller must check
9105 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9107 * @param pvMem The mapping.
9108 * @param fAccess The kind of access.
9109 */
9110IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9111{
9112 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9113 AssertReturn(iMemMap >= 0, iMemMap);
9114
9115 /* If it's bounce buffered, we may need to write back the buffer. */
9116 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9117 {
9118 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9119 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9120 }
9121 /* Otherwise unlock it. */
9122 else
9123 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9124
9125 /* Free the entry. */
9126 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9127 Assert(pVCpu->iem.s.cActiveMappings != 0);
9128 pVCpu->iem.s.cActiveMappings--;
9129 return VINF_SUCCESS;
9130}
9131#endif
9132
9133
9134/**
9135 * Rollbacks mappings, releasing page locks and such.
9136 *
9137 * The caller shall only call this after checking cActiveMappings.
9138 *
9139 * @returns Strict VBox status code to pass up.
9140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9141 */
9142IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9143{
9144 Assert(pVCpu->iem.s.cActiveMappings > 0);
9145
9146 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9147 while (iMemMap-- > 0)
9148 {
9149 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9150 if (fAccess != IEM_ACCESS_INVALID)
9151 {
9152 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9153 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9154 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9155 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9156 Assert(pVCpu->iem.s.cActiveMappings > 0);
9157 pVCpu->iem.s.cActiveMappings--;
9158 }
9159 }
9160}
9161
9162
9163/**
9164 * Fetches a data byte.
9165 *
9166 * @returns Strict VBox status code.
9167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9168 * @param pu8Dst Where to return the byte.
9169 * @param iSegReg The index of the segment register to use for
9170 * this access. The base and limits are checked.
9171 * @param GCPtrMem The address of the guest memory.
9172 */
9173IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9174{
9175 /* The lazy approach for now... */
9176 uint8_t const *pu8Src;
9177 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9178 if (rc == VINF_SUCCESS)
9179 {
9180 *pu8Dst = *pu8Src;
9181 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9182 }
9183 return rc;
9184}
9185
9186
9187#ifdef IEM_WITH_SETJMP
9188/**
9189 * Fetches a data byte, longjmp on error.
9190 *
9191 * @returns The byte.
9192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9193 * @param iSegReg The index of the segment register to use for
9194 * this access. The base and limits are checked.
9195 * @param GCPtrMem The address of the guest memory.
9196 */
9197DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9198{
9199 /* The lazy approach for now... */
9200 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9201 uint8_t const bRet = *pu8Src;
9202 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9203 return bRet;
9204}
9205#endif /* IEM_WITH_SETJMP */
9206
9207
9208/**
9209 * Fetches a data word.
9210 *
9211 * @returns Strict VBox status code.
9212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9213 * @param pu16Dst Where to return the word.
9214 * @param iSegReg The index of the segment register to use for
9215 * this access. The base and limits are checked.
9216 * @param GCPtrMem The address of the guest memory.
9217 */
9218IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9219{
9220 /* The lazy approach for now... */
9221 uint16_t const *pu16Src;
9222 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9223 if (rc == VINF_SUCCESS)
9224 {
9225 *pu16Dst = *pu16Src;
9226 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9227 }
9228 return rc;
9229}
9230
9231
9232#ifdef IEM_WITH_SETJMP
9233/**
9234 * Fetches a data word, longjmp on error.
9235 *
9236 * @returns The word
9237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9238 * @param iSegReg The index of the segment register to use for
9239 * this access. The base and limits are checked.
9240 * @param GCPtrMem The address of the guest memory.
9241 */
9242DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9243{
9244 /* The lazy approach for now... */
9245 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9246 uint16_t const u16Ret = *pu16Src;
9247 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9248 return u16Ret;
9249}
9250#endif
9251
9252
9253/**
9254 * Fetches a data dword.
9255 *
9256 * @returns Strict VBox status code.
9257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9258 * @param pu32Dst Where to return the dword.
9259 * @param iSegReg The index of the segment register to use for
9260 * this access. The base and limits are checked.
9261 * @param GCPtrMem The address of the guest memory.
9262 */
9263IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9264{
9265 /* The lazy approach for now... */
9266 uint32_t const *pu32Src;
9267 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9268 if (rc == VINF_SUCCESS)
9269 {
9270 *pu32Dst = *pu32Src;
9271 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9272 }
9273 return rc;
9274}
9275
9276
9277#ifdef IEM_WITH_SETJMP
9278
9279IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9280{
9281 Assert(cbMem >= 1);
9282 Assert(iSegReg < X86_SREG_COUNT);
9283
9284 /*
9285 * 64-bit mode is simpler.
9286 */
9287 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9288 {
9289 if (iSegReg >= X86_SREG_FS)
9290 {
9291 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9292 GCPtrMem += pSel->u64Base;
9293 }
9294
9295 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9296 return GCPtrMem;
9297 }
9298 /*
9299 * 16-bit and 32-bit segmentation.
9300 */
9301 else
9302 {
9303 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9304 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9305 == X86DESCATTR_P /* data, expand up */
9306 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9307 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9308 {
9309 /* expand up */
9310 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9311 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9312 && GCPtrLast32 > (uint32_t)GCPtrMem))
9313 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9314 }
9315 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9316 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9317 {
9318 /* expand down */
9319 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9320 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9321 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9322 && GCPtrLast32 > (uint32_t)GCPtrMem))
9323 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9324 }
9325 else
9326 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9327 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9328 }
9329 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9330}
9331
9332
9333IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9334{
9335 Assert(cbMem >= 1);
9336 Assert(iSegReg < X86_SREG_COUNT);
9337
9338 /*
9339 * 64-bit mode is simpler.
9340 */
9341 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9342 {
9343 if (iSegReg >= X86_SREG_FS)
9344 {
9345 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9346 GCPtrMem += pSel->u64Base;
9347 }
9348
9349 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9350 return GCPtrMem;
9351 }
9352 /*
9353 * 16-bit and 32-bit segmentation.
9354 */
9355 else
9356 {
9357 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9358 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9359 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9360 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9361 {
9362 /* expand up */
9363 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9364 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9365 && GCPtrLast32 > (uint32_t)GCPtrMem))
9366 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9367 }
9368 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9369 {
9370 /* expand down */
9371 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9372 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9373 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9374 && GCPtrLast32 > (uint32_t)GCPtrMem))
9375 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9376 }
9377 else
9378 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9379 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9380 }
9381 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9382}
9383
9384
9385/**
9386 * Fetches a data dword, longjmp on error, fallback/safe version.
9387 *
9388 * @returns The dword
9389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9390 * @param iSegReg The index of the segment register to use for
9391 * this access. The base and limits are checked.
9392 * @param GCPtrMem The address of the guest memory.
9393 */
9394IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9395{
9396 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9397 uint32_t const u32Ret = *pu32Src;
9398 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9399 return u32Ret;
9400}
9401
9402
9403/**
9404 * Fetches a data dword, longjmp on error.
9405 *
9406 * @returns The dword
9407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9408 * @param iSegReg The index of the segment register to use for
9409 * this access. The base and limits are checked.
9410 * @param GCPtrMem The address of the guest memory.
9411 */
9412DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9413{
9414# ifdef IEM_WITH_DATA_TLB
9415 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9416 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9417 {
9418 /// @todo more later.
9419 }
9420
9421 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9422# else
9423 /* The lazy approach. */
9424 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9425 uint32_t const u32Ret = *pu32Src;
9426 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9427 return u32Ret;
9428# endif
9429}
9430#endif
9431
9432
9433#ifdef SOME_UNUSED_FUNCTION
9434/**
9435 * Fetches a data dword and sign extends it to a qword.
9436 *
9437 * @returns Strict VBox status code.
9438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9439 * @param pu64Dst Where to return the sign extended value.
9440 * @param iSegReg The index of the segment register to use for
9441 * this access. The base and limits are checked.
9442 * @param GCPtrMem The address of the guest memory.
9443 */
9444IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9445{
9446 /* The lazy approach for now... */
9447 int32_t const *pi32Src;
9448 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9449 if (rc == VINF_SUCCESS)
9450 {
9451 *pu64Dst = *pi32Src;
9452 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9453 }
9454#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9455 else
9456 *pu64Dst = 0;
9457#endif
9458 return rc;
9459}
9460#endif
9461
9462
9463/**
9464 * Fetches a data qword.
9465 *
9466 * @returns Strict VBox status code.
9467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9468 * @param pu64Dst Where to return the qword.
9469 * @param iSegReg The index of the segment register to use for
9470 * this access. The base and limits are checked.
9471 * @param GCPtrMem The address of the guest memory.
9472 */
9473IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9474{
9475 /* The lazy approach for now... */
9476 uint64_t const *pu64Src;
9477 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9478 if (rc == VINF_SUCCESS)
9479 {
9480 *pu64Dst = *pu64Src;
9481 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9482 }
9483 return rc;
9484}
9485
9486
9487#ifdef IEM_WITH_SETJMP
9488/**
9489 * Fetches a data qword, longjmp on error.
9490 *
9491 * @returns The qword.
9492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9493 * @param iSegReg The index of the segment register to use for
9494 * this access. The base and limits are checked.
9495 * @param GCPtrMem The address of the guest memory.
9496 */
9497DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9498{
9499 /* The lazy approach for now... */
9500 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9501 uint64_t const u64Ret = *pu64Src;
9502 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9503 return u64Ret;
9504}
9505#endif
9506
9507
9508/**
9509 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9510 *
9511 * @returns Strict VBox status code.
9512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9513 * @param pu64Dst Where to return the qword.
9514 * @param iSegReg The index of the segment register to use for
9515 * this access. The base and limits are checked.
9516 * @param GCPtrMem The address of the guest memory.
9517 */
9518IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9519{
9520 /* The lazy approach for now... */
9521 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9522 if (RT_UNLIKELY(GCPtrMem & 15))
9523 return iemRaiseGeneralProtectionFault0(pVCpu);
9524
9525 uint64_t const *pu64Src;
9526 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9527 if (rc == VINF_SUCCESS)
9528 {
9529 *pu64Dst = *pu64Src;
9530 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9531 }
9532 return rc;
9533}
9534
9535
9536#ifdef IEM_WITH_SETJMP
9537/**
9538 * Fetches a data qword, longjmp on error.
9539 *
9540 * @returns The qword.
9541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9542 * @param iSegReg The index of the segment register to use for
9543 * this access. The base and limits are checked.
9544 * @param GCPtrMem The address of the guest memory.
9545 */
9546DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9547{
9548 /* The lazy approach for now... */
9549 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9550 if (RT_LIKELY(!(GCPtrMem & 15)))
9551 {
9552 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9553 uint64_t const u64Ret = *pu64Src;
9554 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9555 return u64Ret;
9556 }
9557
9558 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9559 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9560}
9561#endif
9562
9563
9564/**
9565 * Fetches a data tword.
9566 *
9567 * @returns Strict VBox status code.
9568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9569 * @param pr80Dst Where to return the tword.
9570 * @param iSegReg The index of the segment register to use for
9571 * this access. The base and limits are checked.
9572 * @param GCPtrMem The address of the guest memory.
9573 */
9574IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9575{
9576 /* The lazy approach for now... */
9577 PCRTFLOAT80U pr80Src;
9578 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9579 if (rc == VINF_SUCCESS)
9580 {
9581 *pr80Dst = *pr80Src;
9582 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9583 }
9584 return rc;
9585}
9586
9587
9588#ifdef IEM_WITH_SETJMP
9589/**
9590 * Fetches a data tword, longjmp on error.
9591 *
9592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9593 * @param pr80Dst Where to return the tword.
9594 * @param iSegReg The index of the segment register to use for
9595 * this access. The base and limits are checked.
9596 * @param GCPtrMem The address of the guest memory.
9597 */
9598DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9599{
9600 /* The lazy approach for now... */
9601 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9602 *pr80Dst = *pr80Src;
9603 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9604}
9605#endif
9606
9607
9608/**
9609 * Fetches a data dqword (double qword), generally SSE related.
9610 *
9611 * @returns Strict VBox status code.
9612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9613 * @param pu128Dst Where to return the qword.
9614 * @param iSegReg The index of the segment register to use for
9615 * this access. The base and limits are checked.
9616 * @param GCPtrMem The address of the guest memory.
9617 */
9618IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9619{
9620 /* The lazy approach for now... */
9621 PCRTUINT128U pu128Src;
9622 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9623 if (rc == VINF_SUCCESS)
9624 {
9625 pu128Dst->au64[0] = pu128Src->au64[0];
9626 pu128Dst->au64[1] = pu128Src->au64[1];
9627 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9628 }
9629 return rc;
9630}
9631
9632
9633#ifdef IEM_WITH_SETJMP
9634/**
9635 * Fetches a data dqword (double qword), generally SSE related.
9636 *
9637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9638 * @param pu128Dst Where to return the qword.
9639 * @param iSegReg The index of the segment register to use for
9640 * this access. The base and limits are checked.
9641 * @param GCPtrMem The address of the guest memory.
9642 */
9643IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9644{
9645 /* The lazy approach for now... */
9646 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9647 pu128Dst->au64[0] = pu128Src->au64[0];
9648 pu128Dst->au64[1] = pu128Src->au64[1];
9649 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9650}
9651#endif
9652
9653
9654/**
9655 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9656 * related.
9657 *
9658 * Raises \#GP(0) if not aligned.
9659 *
9660 * @returns Strict VBox status code.
9661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9662 * @param pu128Dst Where to return the qword.
9663 * @param iSegReg The index of the segment register to use for
9664 * this access. The base and limits are checked.
9665 * @param GCPtrMem The address of the guest memory.
9666 */
9667IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9668{
9669 /* The lazy approach for now... */
9670 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9671 if ( (GCPtrMem & 15)
9672 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9673 return iemRaiseGeneralProtectionFault0(pVCpu);
9674
9675 PCRTUINT128U pu128Src;
9676 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9677 if (rc == VINF_SUCCESS)
9678 {
9679 pu128Dst->au64[0] = pu128Src->au64[0];
9680 pu128Dst->au64[1] = pu128Src->au64[1];
9681 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9682 }
9683 return rc;
9684}
9685
9686
9687#ifdef IEM_WITH_SETJMP
9688/**
9689 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9690 * related, longjmp on error.
9691 *
9692 * Raises \#GP(0) if not aligned.
9693 *
9694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9695 * @param pu128Dst Where to return the qword.
9696 * @param iSegReg The index of the segment register to use for
9697 * this access. The base and limits are checked.
9698 * @param GCPtrMem The address of the guest memory.
9699 */
9700DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9701{
9702 /* The lazy approach for now... */
9703 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9704 if ( (GCPtrMem & 15) == 0
9705 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9706 {
9707 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9708 pu128Dst->au64[0] = pu128Src->au64[0];
9709 pu128Dst->au64[1] = pu128Src->au64[1];
9710 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9711 return;
9712 }
9713
9714 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9715 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9716}
9717#endif
9718
9719
9720/**
9721 * Fetches a data oword (octo word), generally AVX related.
9722 *
9723 * @returns Strict VBox status code.
9724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9725 * @param pu256Dst Where to return the qword.
9726 * @param iSegReg The index of the segment register to use for
9727 * this access. The base and limits are checked.
9728 * @param GCPtrMem The address of the guest memory.
9729 */
9730IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9731{
9732 /* The lazy approach for now... */
9733 PCRTUINT256U pu256Src;
9734 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9735 if (rc == VINF_SUCCESS)
9736 {
9737 pu256Dst->au64[0] = pu256Src->au64[0];
9738 pu256Dst->au64[1] = pu256Src->au64[1];
9739 pu256Dst->au64[2] = pu256Src->au64[2];
9740 pu256Dst->au64[3] = pu256Src->au64[3];
9741 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9742 }
9743 return rc;
9744}
9745
9746
9747#ifdef IEM_WITH_SETJMP
9748/**
9749 * Fetches a data oword (octo word), generally AVX related.
9750 *
9751 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9752 * @param pu256Dst Where to return the qword.
9753 * @param iSegReg The index of the segment register to use for
9754 * this access. The base and limits are checked.
9755 * @param GCPtrMem The address of the guest memory.
9756 */
9757IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9758{
9759 /* The lazy approach for now... */
9760 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9761 pu256Dst->au64[0] = pu256Src->au64[0];
9762 pu256Dst->au64[1] = pu256Src->au64[1];
9763 pu256Dst->au64[2] = pu256Src->au64[2];
9764 pu256Dst->au64[3] = pu256Src->au64[3];
9765 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9766}
9767#endif
9768
9769
9770/**
9771 * Fetches a data oword (octo word) at an aligned address, generally AVX
9772 * related.
9773 *
9774 * Raises \#GP(0) if not aligned.
9775 *
9776 * @returns Strict VBox status code.
9777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9778 * @param pu256Dst Where to return the qword.
9779 * @param iSegReg The index of the segment register to use for
9780 * this access. The base and limits are checked.
9781 * @param GCPtrMem The address of the guest memory.
9782 */
9783IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9784{
9785 /* The lazy approach for now... */
9786 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9787 if (GCPtrMem & 31)
9788 return iemRaiseGeneralProtectionFault0(pVCpu);
9789
9790 PCRTUINT256U pu256Src;
9791 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9792 if (rc == VINF_SUCCESS)
9793 {
9794 pu256Dst->au64[0] = pu256Src->au64[0];
9795 pu256Dst->au64[1] = pu256Src->au64[1];
9796 pu256Dst->au64[2] = pu256Src->au64[2];
9797 pu256Dst->au64[3] = pu256Src->au64[3];
9798 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9799 }
9800 return rc;
9801}
9802
9803
9804#ifdef IEM_WITH_SETJMP
9805/**
9806 * Fetches a data oword (octo word) at an aligned address, generally AVX
9807 * related, longjmp on error.
9808 *
9809 * Raises \#GP(0) if not aligned.
9810 *
9811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9812 * @param pu256Dst Where to return the qword.
9813 * @param iSegReg The index of the segment register to use for
9814 * this access. The base and limits are checked.
9815 * @param GCPtrMem The address of the guest memory.
9816 */
9817DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9818{
9819 /* The lazy approach for now... */
9820 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9821 if ((GCPtrMem & 31) == 0)
9822 {
9823 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9824 pu256Dst->au64[0] = pu256Src->au64[0];
9825 pu256Dst->au64[1] = pu256Src->au64[1];
9826 pu256Dst->au64[2] = pu256Src->au64[2];
9827 pu256Dst->au64[3] = pu256Src->au64[3];
9828 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9829 return;
9830 }
9831
9832 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9833 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9834}
9835#endif
9836
9837
9838
9839/**
9840 * Fetches a descriptor register (lgdt, lidt).
9841 *
9842 * @returns Strict VBox status code.
9843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9844 * @param pcbLimit Where to return the limit.
9845 * @param pGCPtrBase Where to return the base.
9846 * @param iSegReg The index of the segment register to use for
9847 * this access. The base and limits are checked.
9848 * @param GCPtrMem The address of the guest memory.
9849 * @param enmOpSize The effective operand size.
9850 */
9851IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9852 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9853{
9854 /*
9855 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9856 * little special:
9857 * - The two reads are done separately.
9858 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9859 * - We suspect the 386 to actually commit the limit before the base in
9860 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9861 * don't try emulate this eccentric behavior, because it's not well
9862 * enough understood and rather hard to trigger.
9863 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9864 */
9865 VBOXSTRICTRC rcStrict;
9866 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9867 {
9868 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9869 if (rcStrict == VINF_SUCCESS)
9870 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9871 }
9872 else
9873 {
9874 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9875 if (enmOpSize == IEMMODE_32BIT)
9876 {
9877 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9878 {
9879 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9880 if (rcStrict == VINF_SUCCESS)
9881 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9882 }
9883 else
9884 {
9885 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9886 if (rcStrict == VINF_SUCCESS)
9887 {
9888 *pcbLimit = (uint16_t)uTmp;
9889 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9890 }
9891 }
9892 if (rcStrict == VINF_SUCCESS)
9893 *pGCPtrBase = uTmp;
9894 }
9895 else
9896 {
9897 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9898 if (rcStrict == VINF_SUCCESS)
9899 {
9900 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9901 if (rcStrict == VINF_SUCCESS)
9902 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9903 }
9904 }
9905 }
9906 return rcStrict;
9907}
9908
9909
9910
9911/**
9912 * Stores a data byte.
9913 *
9914 * @returns Strict VBox status code.
9915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9916 * @param iSegReg The index of the segment register to use for
9917 * this access. The base and limits are checked.
9918 * @param GCPtrMem The address of the guest memory.
9919 * @param u8Value The value to store.
9920 */
9921IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9922{
9923 /* The lazy approach for now... */
9924 uint8_t *pu8Dst;
9925 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9926 if (rc == VINF_SUCCESS)
9927 {
9928 *pu8Dst = u8Value;
9929 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9930 }
9931 return rc;
9932}
9933
9934
9935#ifdef IEM_WITH_SETJMP
9936/**
9937 * Stores a data byte, longjmp on error.
9938 *
9939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9940 * @param iSegReg The index of the segment register to use for
9941 * this access. The base and limits are checked.
9942 * @param GCPtrMem The address of the guest memory.
9943 * @param u8Value The value to store.
9944 */
9945IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9946{
9947 /* The lazy approach for now... */
9948 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9949 *pu8Dst = u8Value;
9950 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9951}
9952#endif
9953
9954
9955/**
9956 * Stores a data word.
9957 *
9958 * @returns Strict VBox status code.
9959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9960 * @param iSegReg The index of the segment register to use for
9961 * this access. The base and limits are checked.
9962 * @param GCPtrMem The address of the guest memory.
9963 * @param u16Value The value to store.
9964 */
9965IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9966{
9967 /* The lazy approach for now... */
9968 uint16_t *pu16Dst;
9969 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9970 if (rc == VINF_SUCCESS)
9971 {
9972 *pu16Dst = u16Value;
9973 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9974 }
9975 return rc;
9976}
9977
9978
9979#ifdef IEM_WITH_SETJMP
9980/**
9981 * Stores a data word, longjmp on error.
9982 *
9983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9984 * @param iSegReg The index of the segment register to use for
9985 * this access. The base and limits are checked.
9986 * @param GCPtrMem The address of the guest memory.
9987 * @param u16Value The value to store.
9988 */
9989IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9990{
9991 /* The lazy approach for now... */
9992 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9993 *pu16Dst = u16Value;
9994 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9995}
9996#endif
9997
9998
9999/**
10000 * Stores a data dword.
10001 *
10002 * @returns Strict VBox status code.
10003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10004 * @param iSegReg The index of the segment register to use for
10005 * this access. The base and limits are checked.
10006 * @param GCPtrMem The address of the guest memory.
10007 * @param u32Value The value to store.
10008 */
10009IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10010{
10011 /* The lazy approach for now... */
10012 uint32_t *pu32Dst;
10013 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10014 if (rc == VINF_SUCCESS)
10015 {
10016 *pu32Dst = u32Value;
10017 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10018 }
10019 return rc;
10020}
10021
10022
10023#ifdef IEM_WITH_SETJMP
10024/**
10025 * Stores a data dword.
10026 *
10027 * @returns Strict VBox status code.
10028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10029 * @param iSegReg The index of the segment register to use for
10030 * this access. The base and limits are checked.
10031 * @param GCPtrMem The address of the guest memory.
10032 * @param u32Value The value to store.
10033 */
10034IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10035{
10036 /* The lazy approach for now... */
10037 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10038 *pu32Dst = u32Value;
10039 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10040}
10041#endif
10042
10043
10044/**
10045 * Stores a data qword.
10046 *
10047 * @returns Strict VBox status code.
10048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10049 * @param iSegReg The index of the segment register to use for
10050 * this access. The base and limits are checked.
10051 * @param GCPtrMem The address of the guest memory.
10052 * @param u64Value The value to store.
10053 */
10054IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10055{
10056 /* The lazy approach for now... */
10057 uint64_t *pu64Dst;
10058 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10059 if (rc == VINF_SUCCESS)
10060 {
10061 *pu64Dst = u64Value;
10062 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10063 }
10064 return rc;
10065}
10066
10067
10068#ifdef IEM_WITH_SETJMP
10069/**
10070 * Stores a data qword, longjmp on error.
10071 *
10072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10073 * @param iSegReg The index of the segment register to use for
10074 * this access. The base and limits are checked.
10075 * @param GCPtrMem The address of the guest memory.
10076 * @param u64Value The value to store.
10077 */
10078IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10079{
10080 /* The lazy approach for now... */
10081 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10082 *pu64Dst = u64Value;
10083 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10084}
10085#endif
10086
10087
10088/**
10089 * Stores a data dqword.
10090 *
10091 * @returns Strict VBox status code.
10092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10093 * @param iSegReg The index of the segment register to use for
10094 * this access. The base and limits are checked.
10095 * @param GCPtrMem The address of the guest memory.
10096 * @param u128Value The value to store.
10097 */
10098IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10099{
10100 /* The lazy approach for now... */
10101 PRTUINT128U pu128Dst;
10102 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10103 if (rc == VINF_SUCCESS)
10104 {
10105 pu128Dst->au64[0] = u128Value.au64[0];
10106 pu128Dst->au64[1] = u128Value.au64[1];
10107 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10108 }
10109 return rc;
10110}
10111
10112
10113#ifdef IEM_WITH_SETJMP
10114/**
10115 * Stores a data dqword, longjmp on error.
10116 *
10117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10118 * @param iSegReg The index of the segment register to use for
10119 * this access. The base and limits are checked.
10120 * @param GCPtrMem The address of the guest memory.
10121 * @param u128Value The value to store.
10122 */
10123IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10124{
10125 /* The lazy approach for now... */
10126 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10127 pu128Dst->au64[0] = u128Value.au64[0];
10128 pu128Dst->au64[1] = u128Value.au64[1];
10129 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10130}
10131#endif
10132
10133
10134/**
10135 * Stores a data dqword, SSE aligned.
10136 *
10137 * @returns Strict VBox status code.
10138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10139 * @param iSegReg The index of the segment register to use for
10140 * this access. The base and limits are checked.
10141 * @param GCPtrMem The address of the guest memory.
10142 * @param u128Value The value to store.
10143 */
10144IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10145{
10146 /* The lazy approach for now... */
10147 if ( (GCPtrMem & 15)
10148 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10149 return iemRaiseGeneralProtectionFault0(pVCpu);
10150
10151 PRTUINT128U pu128Dst;
10152 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10153 if (rc == VINF_SUCCESS)
10154 {
10155 pu128Dst->au64[0] = u128Value.au64[0];
10156 pu128Dst->au64[1] = u128Value.au64[1];
10157 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10158 }
10159 return rc;
10160}
10161
10162
10163#ifdef IEM_WITH_SETJMP
10164/**
10165 * Stores a data dqword, SSE aligned.
10166 *
10167 * @returns Strict VBox status code.
10168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10169 * @param iSegReg The index of the segment register to use for
10170 * this access. The base and limits are checked.
10171 * @param GCPtrMem The address of the guest memory.
10172 * @param u128Value The value to store.
10173 */
10174DECL_NO_INLINE(IEM_STATIC, void)
10175iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10176{
10177 /* The lazy approach for now... */
10178 if ( (GCPtrMem & 15) == 0
10179 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10180 {
10181 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10182 pu128Dst->au64[0] = u128Value.au64[0];
10183 pu128Dst->au64[1] = u128Value.au64[1];
10184 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10185 return;
10186 }
10187
10188 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10189 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10190}
10191#endif
10192
10193
10194/**
10195 * Stores a data dqword.
10196 *
10197 * @returns Strict VBox status code.
10198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10199 * @param iSegReg The index of the segment register to use for
10200 * this access. The base and limits are checked.
10201 * @param GCPtrMem The address of the guest memory.
10202 * @param pu256Value Pointer to the value to store.
10203 */
10204IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10205{
10206 /* The lazy approach for now... */
10207 PRTUINT256U pu256Dst;
10208 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10209 if (rc == VINF_SUCCESS)
10210 {
10211 pu256Dst->au64[0] = pu256Value->au64[0];
10212 pu256Dst->au64[1] = pu256Value->au64[1];
10213 pu256Dst->au64[2] = pu256Value->au64[2];
10214 pu256Dst->au64[3] = pu256Value->au64[3];
10215 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10216 }
10217 return rc;
10218}
10219
10220
10221#ifdef IEM_WITH_SETJMP
10222/**
10223 * Stores a data dqword, longjmp on error.
10224 *
10225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10226 * @param iSegReg The index of the segment register to use for
10227 * this access. The base and limits are checked.
10228 * @param GCPtrMem The address of the guest memory.
10229 * @param pu256Value Pointer to the value to store.
10230 */
10231IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10232{
10233 /* The lazy approach for now... */
10234 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10235 pu256Dst->au64[0] = pu256Value->au64[0];
10236 pu256Dst->au64[1] = pu256Value->au64[1];
10237 pu256Dst->au64[2] = pu256Value->au64[2];
10238 pu256Dst->au64[3] = pu256Value->au64[3];
10239 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10240}
10241#endif
10242
10243
10244/**
10245 * Stores a data dqword, AVX aligned.
10246 *
10247 * @returns Strict VBox status code.
10248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10249 * @param iSegReg The index of the segment register to use for
10250 * this access. The base and limits are checked.
10251 * @param GCPtrMem The address of the guest memory.
10252 * @param pu256Value Pointer to the value to store.
10253 */
10254IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10255{
10256 /* The lazy approach for now... */
10257 if (GCPtrMem & 31)
10258 return iemRaiseGeneralProtectionFault0(pVCpu);
10259
10260 PRTUINT256U pu256Dst;
10261 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10262 if (rc == VINF_SUCCESS)
10263 {
10264 pu256Dst->au64[0] = pu256Value->au64[0];
10265 pu256Dst->au64[1] = pu256Value->au64[1];
10266 pu256Dst->au64[2] = pu256Value->au64[2];
10267 pu256Dst->au64[3] = pu256Value->au64[3];
10268 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10269 }
10270 return rc;
10271}
10272
10273
10274#ifdef IEM_WITH_SETJMP
10275/**
10276 * Stores a data dqword, AVX aligned.
10277 *
10278 * @returns Strict VBox status code.
10279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10280 * @param iSegReg The index of the segment register to use for
10281 * this access. The base and limits are checked.
10282 * @param GCPtrMem The address of the guest memory.
10283 * @param pu256Value Pointer to the value to store.
10284 */
10285DECL_NO_INLINE(IEM_STATIC, void)
10286iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10287{
10288 /* The lazy approach for now... */
10289 if ((GCPtrMem & 31) == 0)
10290 {
10291 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10292 pu256Dst->au64[0] = pu256Value->au64[0];
10293 pu256Dst->au64[1] = pu256Value->au64[1];
10294 pu256Dst->au64[2] = pu256Value->au64[2];
10295 pu256Dst->au64[3] = pu256Value->au64[3];
10296 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10297 return;
10298 }
10299
10300 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10301 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10302}
10303#endif
10304
10305
10306/**
10307 * Stores a descriptor register (sgdt, sidt).
10308 *
10309 * @returns Strict VBox status code.
10310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10311 * @param cbLimit The limit.
10312 * @param GCPtrBase The base address.
10313 * @param iSegReg The index of the segment register to use for
10314 * this access. The base and limits are checked.
10315 * @param GCPtrMem The address of the guest memory.
10316 */
10317IEM_STATIC VBOXSTRICTRC
10318iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10319{
10320 VBOXSTRICTRC rcStrict;
10321 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
10322 {
10323 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
10324 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
10325 }
10326
10327 /*
10328 * The SIDT and SGDT instructions actually stores the data using two
10329 * independent writes. The instructions does not respond to opsize prefixes.
10330 */
10331 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10332 if (rcStrict == VINF_SUCCESS)
10333 {
10334 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10335 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10336 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10337 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10338 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10339 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10340 else
10341 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10342 }
10343 return rcStrict;
10344}
10345
10346
10347/**
10348 * Pushes a word onto the stack.
10349 *
10350 * @returns Strict VBox status code.
10351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10352 * @param u16Value The value to push.
10353 */
10354IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10355{
10356 /* Increment the stack pointer. */
10357 uint64_t uNewRsp;
10358 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10359 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10360
10361 /* Write the word the lazy way. */
10362 uint16_t *pu16Dst;
10363 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10364 if (rc == VINF_SUCCESS)
10365 {
10366 *pu16Dst = u16Value;
10367 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10368 }
10369
10370 /* Commit the new RSP value unless we an access handler made trouble. */
10371 if (rc == VINF_SUCCESS)
10372 pCtx->rsp = uNewRsp;
10373
10374 return rc;
10375}
10376
10377
10378/**
10379 * Pushes a dword onto the stack.
10380 *
10381 * @returns Strict VBox status code.
10382 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10383 * @param u32Value The value to push.
10384 */
10385IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10386{
10387 /* Increment the stack pointer. */
10388 uint64_t uNewRsp;
10389 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10390 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10391
10392 /* Write the dword the lazy way. */
10393 uint32_t *pu32Dst;
10394 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10395 if (rc == VINF_SUCCESS)
10396 {
10397 *pu32Dst = u32Value;
10398 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10399 }
10400
10401 /* Commit the new RSP value unless we an access handler made trouble. */
10402 if (rc == VINF_SUCCESS)
10403 pCtx->rsp = uNewRsp;
10404
10405 return rc;
10406}
10407
10408
10409/**
10410 * Pushes a dword segment register value onto the stack.
10411 *
10412 * @returns Strict VBox status code.
10413 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10414 * @param u32Value The value to push.
10415 */
10416IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10417{
10418 /* Increment the stack pointer. */
10419 uint64_t uNewRsp;
10420 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10421 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10422
10423 VBOXSTRICTRC rc;
10424 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10425 {
10426 /* The recompiler writes a full dword. */
10427 uint32_t *pu32Dst;
10428 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10429 if (rc == VINF_SUCCESS)
10430 {
10431 *pu32Dst = u32Value;
10432 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10433 }
10434 }
10435 else
10436 {
10437 /* The intel docs talks about zero extending the selector register
10438 value. My actual intel CPU here might be zero extending the value
10439 but it still only writes the lower word... */
10440 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10441 * happens when crossing an electric page boundrary, is the high word checked
10442 * for write accessibility or not? Probably it is. What about segment limits?
10443 * It appears this behavior is also shared with trap error codes.
10444 *
10445 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10446 * ancient hardware when it actually did change. */
10447 uint16_t *pu16Dst;
10448 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10449 if (rc == VINF_SUCCESS)
10450 {
10451 *pu16Dst = (uint16_t)u32Value;
10452 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10453 }
10454 }
10455
10456 /* Commit the new RSP value unless we an access handler made trouble. */
10457 if (rc == VINF_SUCCESS)
10458 pCtx->rsp = uNewRsp;
10459
10460 return rc;
10461}
10462
10463
10464/**
10465 * Pushes a qword onto the stack.
10466 *
10467 * @returns Strict VBox status code.
10468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10469 * @param u64Value The value to push.
10470 */
10471IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10472{
10473 /* Increment the stack pointer. */
10474 uint64_t uNewRsp;
10475 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10476 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10477
10478 /* Write the word the lazy way. */
10479 uint64_t *pu64Dst;
10480 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10481 if (rc == VINF_SUCCESS)
10482 {
10483 *pu64Dst = u64Value;
10484 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10485 }
10486
10487 /* Commit the new RSP value unless we an access handler made trouble. */
10488 if (rc == VINF_SUCCESS)
10489 pCtx->rsp = uNewRsp;
10490
10491 return rc;
10492}
10493
10494
10495/**
10496 * Pops a word from the stack.
10497 *
10498 * @returns Strict VBox status code.
10499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10500 * @param pu16Value Where to store the popped value.
10501 */
10502IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10503{
10504 /* Increment the stack pointer. */
10505 uint64_t uNewRsp;
10506 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10507 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10508
10509 /* Write the word the lazy way. */
10510 uint16_t const *pu16Src;
10511 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10512 if (rc == VINF_SUCCESS)
10513 {
10514 *pu16Value = *pu16Src;
10515 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10516
10517 /* Commit the new RSP value. */
10518 if (rc == VINF_SUCCESS)
10519 pCtx->rsp = uNewRsp;
10520 }
10521
10522 return rc;
10523}
10524
10525
10526/**
10527 * Pops a dword from the stack.
10528 *
10529 * @returns Strict VBox status code.
10530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10531 * @param pu32Value Where to store the popped value.
10532 */
10533IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10534{
10535 /* Increment the stack pointer. */
10536 uint64_t uNewRsp;
10537 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10538 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10539
10540 /* Write the word the lazy way. */
10541 uint32_t const *pu32Src;
10542 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10543 if (rc == VINF_SUCCESS)
10544 {
10545 *pu32Value = *pu32Src;
10546 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10547
10548 /* Commit the new RSP value. */
10549 if (rc == VINF_SUCCESS)
10550 pCtx->rsp = uNewRsp;
10551 }
10552
10553 return rc;
10554}
10555
10556
10557/**
10558 * Pops a qword from the stack.
10559 *
10560 * @returns Strict VBox status code.
10561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10562 * @param pu64Value Where to store the popped value.
10563 */
10564IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10565{
10566 /* Increment the stack pointer. */
10567 uint64_t uNewRsp;
10568 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10569 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10570
10571 /* Write the word the lazy way. */
10572 uint64_t const *pu64Src;
10573 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10574 if (rc == VINF_SUCCESS)
10575 {
10576 *pu64Value = *pu64Src;
10577 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10578
10579 /* Commit the new RSP value. */
10580 if (rc == VINF_SUCCESS)
10581 pCtx->rsp = uNewRsp;
10582 }
10583
10584 return rc;
10585}
10586
10587
10588/**
10589 * Pushes a word onto the stack, using a temporary stack pointer.
10590 *
10591 * @returns Strict VBox status code.
10592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10593 * @param u16Value The value to push.
10594 * @param pTmpRsp Pointer to the temporary stack pointer.
10595 */
10596IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10597{
10598 /* Increment the stack pointer. */
10599 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10600 RTUINT64U NewRsp = *pTmpRsp;
10601 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10602
10603 /* Write the word the lazy way. */
10604 uint16_t *pu16Dst;
10605 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10606 if (rc == VINF_SUCCESS)
10607 {
10608 *pu16Dst = u16Value;
10609 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10610 }
10611
10612 /* Commit the new RSP value unless we an access handler made trouble. */
10613 if (rc == VINF_SUCCESS)
10614 *pTmpRsp = NewRsp;
10615
10616 return rc;
10617}
10618
10619
10620/**
10621 * Pushes a dword onto the stack, using a temporary stack pointer.
10622 *
10623 * @returns Strict VBox status code.
10624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10625 * @param u32Value The value to push.
10626 * @param pTmpRsp Pointer to the temporary stack pointer.
10627 */
10628IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10629{
10630 /* Increment the stack pointer. */
10631 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10632 RTUINT64U NewRsp = *pTmpRsp;
10633 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10634
10635 /* Write the word the lazy way. */
10636 uint32_t *pu32Dst;
10637 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10638 if (rc == VINF_SUCCESS)
10639 {
10640 *pu32Dst = u32Value;
10641 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10642 }
10643
10644 /* Commit the new RSP value unless we an access handler made trouble. */
10645 if (rc == VINF_SUCCESS)
10646 *pTmpRsp = NewRsp;
10647
10648 return rc;
10649}
10650
10651
10652/**
10653 * Pushes a dword onto the stack, using a temporary stack pointer.
10654 *
10655 * @returns Strict VBox status code.
10656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10657 * @param u64Value The value to push.
10658 * @param pTmpRsp Pointer to the temporary stack pointer.
10659 */
10660IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10661{
10662 /* Increment the stack pointer. */
10663 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10664 RTUINT64U NewRsp = *pTmpRsp;
10665 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10666
10667 /* Write the word the lazy way. */
10668 uint64_t *pu64Dst;
10669 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10670 if (rc == VINF_SUCCESS)
10671 {
10672 *pu64Dst = u64Value;
10673 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10674 }
10675
10676 /* Commit the new RSP value unless we an access handler made trouble. */
10677 if (rc == VINF_SUCCESS)
10678 *pTmpRsp = NewRsp;
10679
10680 return rc;
10681}
10682
10683
10684/**
10685 * Pops a word from the stack, using a temporary stack pointer.
10686 *
10687 * @returns Strict VBox status code.
10688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10689 * @param pu16Value Where to store the popped value.
10690 * @param pTmpRsp Pointer to the temporary stack pointer.
10691 */
10692IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10693{
10694 /* Increment the stack pointer. */
10695 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10696 RTUINT64U NewRsp = *pTmpRsp;
10697 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10698
10699 /* Write the word the lazy way. */
10700 uint16_t const *pu16Src;
10701 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10702 if (rc == VINF_SUCCESS)
10703 {
10704 *pu16Value = *pu16Src;
10705 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10706
10707 /* Commit the new RSP value. */
10708 if (rc == VINF_SUCCESS)
10709 *pTmpRsp = NewRsp;
10710 }
10711
10712 return rc;
10713}
10714
10715
10716/**
10717 * Pops a dword from the stack, using a temporary stack pointer.
10718 *
10719 * @returns Strict VBox status code.
10720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10721 * @param pu32Value Where to store the popped value.
10722 * @param pTmpRsp Pointer to the temporary stack pointer.
10723 */
10724IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10725{
10726 /* Increment the stack pointer. */
10727 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10728 RTUINT64U NewRsp = *pTmpRsp;
10729 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10730
10731 /* Write the word the lazy way. */
10732 uint32_t const *pu32Src;
10733 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10734 if (rc == VINF_SUCCESS)
10735 {
10736 *pu32Value = *pu32Src;
10737 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10738
10739 /* Commit the new RSP value. */
10740 if (rc == VINF_SUCCESS)
10741 *pTmpRsp = NewRsp;
10742 }
10743
10744 return rc;
10745}
10746
10747
10748/**
10749 * Pops a qword from the stack, using a temporary stack pointer.
10750 *
10751 * @returns Strict VBox status code.
10752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10753 * @param pu64Value Where to store the popped value.
10754 * @param pTmpRsp Pointer to the temporary stack pointer.
10755 */
10756IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10757{
10758 /* Increment the stack pointer. */
10759 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10760 RTUINT64U NewRsp = *pTmpRsp;
10761 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10762
10763 /* Write the word the lazy way. */
10764 uint64_t const *pu64Src;
10765 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10766 if (rcStrict == VINF_SUCCESS)
10767 {
10768 *pu64Value = *pu64Src;
10769 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10770
10771 /* Commit the new RSP value. */
10772 if (rcStrict == VINF_SUCCESS)
10773 *pTmpRsp = NewRsp;
10774 }
10775
10776 return rcStrict;
10777}
10778
10779
10780/**
10781 * Begin a special stack push (used by interrupt, exceptions and such).
10782 *
10783 * This will raise \#SS or \#PF if appropriate.
10784 *
10785 * @returns Strict VBox status code.
10786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10787 * @param cbMem The number of bytes to push onto the stack.
10788 * @param ppvMem Where to return the pointer to the stack memory.
10789 * As with the other memory functions this could be
10790 * direct access or bounce buffered access, so
10791 * don't commit register until the commit call
10792 * succeeds.
10793 * @param puNewRsp Where to return the new RSP value. This must be
10794 * passed unchanged to
10795 * iemMemStackPushCommitSpecial().
10796 */
10797IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10798{
10799 Assert(cbMem < UINT8_MAX);
10800 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10801 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10802 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10803}
10804
10805
10806/**
10807 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10808 *
10809 * This will update the rSP.
10810 *
10811 * @returns Strict VBox status code.
10812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10813 * @param pvMem The pointer returned by
10814 * iemMemStackPushBeginSpecial().
10815 * @param uNewRsp The new RSP value returned by
10816 * iemMemStackPushBeginSpecial().
10817 */
10818IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10819{
10820 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10821 if (rcStrict == VINF_SUCCESS)
10822 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10823 return rcStrict;
10824}
10825
10826
10827/**
10828 * Begin a special stack pop (used by iret, retf and such).
10829 *
10830 * This will raise \#SS or \#PF if appropriate.
10831 *
10832 * @returns Strict VBox status code.
10833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10834 * @param cbMem The number of bytes to pop from the stack.
10835 * @param ppvMem Where to return the pointer to the stack memory.
10836 * @param puNewRsp Where to return the new RSP value. This must be
10837 * assigned to CPUMCTX::rsp manually some time
10838 * after iemMemStackPopDoneSpecial() has been
10839 * called.
10840 */
10841IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10842{
10843 Assert(cbMem < UINT8_MAX);
10844 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10845 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10846 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10847}
10848
10849
10850/**
10851 * Continue a special stack pop (used by iret and retf).
10852 *
10853 * This will raise \#SS or \#PF if appropriate.
10854 *
10855 * @returns Strict VBox status code.
10856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10857 * @param cbMem The number of bytes to pop from the stack.
10858 * @param ppvMem Where to return the pointer to the stack memory.
10859 * @param puNewRsp Where to return the new RSP value. This must be
10860 * assigned to CPUMCTX::rsp manually some time
10861 * after iemMemStackPopDoneSpecial() has been
10862 * called.
10863 */
10864IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10865{
10866 Assert(cbMem < UINT8_MAX);
10867 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10868 RTUINT64U NewRsp;
10869 NewRsp.u = *puNewRsp;
10870 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10871 *puNewRsp = NewRsp.u;
10872 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10873}
10874
10875
10876/**
10877 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10878 * iemMemStackPopContinueSpecial).
10879 *
10880 * The caller will manually commit the rSP.
10881 *
10882 * @returns Strict VBox status code.
10883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10884 * @param pvMem The pointer returned by
10885 * iemMemStackPopBeginSpecial() or
10886 * iemMemStackPopContinueSpecial().
10887 */
10888IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10889{
10890 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10891}
10892
10893
10894/**
10895 * Fetches a system table byte.
10896 *
10897 * @returns Strict VBox status code.
10898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10899 * @param pbDst Where to return the byte.
10900 * @param iSegReg The index of the segment register to use for
10901 * this access. The base and limits are checked.
10902 * @param GCPtrMem The address of the guest memory.
10903 */
10904IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10905{
10906 /* The lazy approach for now... */
10907 uint8_t const *pbSrc;
10908 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10909 if (rc == VINF_SUCCESS)
10910 {
10911 *pbDst = *pbSrc;
10912 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10913 }
10914 return rc;
10915}
10916
10917
10918/**
10919 * Fetches a system table word.
10920 *
10921 * @returns Strict VBox status code.
10922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10923 * @param pu16Dst Where to return the word.
10924 * @param iSegReg The index of the segment register to use for
10925 * this access. The base and limits are checked.
10926 * @param GCPtrMem The address of the guest memory.
10927 */
10928IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10929{
10930 /* The lazy approach for now... */
10931 uint16_t const *pu16Src;
10932 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10933 if (rc == VINF_SUCCESS)
10934 {
10935 *pu16Dst = *pu16Src;
10936 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10937 }
10938 return rc;
10939}
10940
10941
10942/**
10943 * Fetches a system table dword.
10944 *
10945 * @returns Strict VBox status code.
10946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10947 * @param pu32Dst Where to return the dword.
10948 * @param iSegReg The index of the segment register to use for
10949 * this access. The base and limits are checked.
10950 * @param GCPtrMem The address of the guest memory.
10951 */
10952IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10953{
10954 /* The lazy approach for now... */
10955 uint32_t const *pu32Src;
10956 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10957 if (rc == VINF_SUCCESS)
10958 {
10959 *pu32Dst = *pu32Src;
10960 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10961 }
10962 return rc;
10963}
10964
10965
10966/**
10967 * Fetches a system table qword.
10968 *
10969 * @returns Strict VBox status code.
10970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10971 * @param pu64Dst Where to return the qword.
10972 * @param iSegReg The index of the segment register to use for
10973 * this access. The base and limits are checked.
10974 * @param GCPtrMem The address of the guest memory.
10975 */
10976IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10977{
10978 /* The lazy approach for now... */
10979 uint64_t const *pu64Src;
10980 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10981 if (rc == VINF_SUCCESS)
10982 {
10983 *pu64Dst = *pu64Src;
10984 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10985 }
10986 return rc;
10987}
10988
10989
10990/**
10991 * Fetches a descriptor table entry with caller specified error code.
10992 *
10993 * @returns Strict VBox status code.
10994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10995 * @param pDesc Where to return the descriptor table entry.
10996 * @param uSel The selector which table entry to fetch.
10997 * @param uXcpt The exception to raise on table lookup error.
10998 * @param uErrorCode The error code associated with the exception.
10999 */
11000IEM_STATIC VBOXSTRICTRC
11001iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
11002{
11003 AssertPtr(pDesc);
11004 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11005
11006 /** @todo did the 286 require all 8 bytes to be accessible? */
11007 /*
11008 * Get the selector table base and check bounds.
11009 */
11010 RTGCPTR GCPtrBase;
11011 if (uSel & X86_SEL_LDT)
11012 {
11013 if ( !pCtx->ldtr.Attr.n.u1Present
11014 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
11015 {
11016 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
11017 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
11018 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11019 uErrorCode, 0);
11020 }
11021
11022 Assert(pCtx->ldtr.Attr.n.u1Present);
11023 GCPtrBase = pCtx->ldtr.u64Base;
11024 }
11025 else
11026 {
11027 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
11028 {
11029 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
11030 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11031 uErrorCode, 0);
11032 }
11033 GCPtrBase = pCtx->gdtr.pGdt;
11034 }
11035
11036 /*
11037 * Read the legacy descriptor and maybe the long mode extensions if
11038 * required.
11039 */
11040 VBOXSTRICTRC rcStrict;
11041 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11042 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11043 else
11044 {
11045 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11046 if (rcStrict == VINF_SUCCESS)
11047 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11048 if (rcStrict == VINF_SUCCESS)
11049 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11050 if (rcStrict == VINF_SUCCESS)
11051 pDesc->Legacy.au16[3] = 0;
11052 else
11053 return rcStrict;
11054 }
11055
11056 if (rcStrict == VINF_SUCCESS)
11057 {
11058 if ( !IEM_IS_LONG_MODE(pVCpu)
11059 || pDesc->Legacy.Gen.u1DescType)
11060 pDesc->Long.au64[1] = 0;
11061 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
11062 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11063 else
11064 {
11065 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11066 /** @todo is this the right exception? */
11067 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11068 }
11069 }
11070 return rcStrict;
11071}
11072
11073
11074/**
11075 * Fetches a descriptor table entry.
11076 *
11077 * @returns Strict VBox status code.
11078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11079 * @param pDesc Where to return the descriptor table entry.
11080 * @param uSel The selector which table entry to fetch.
11081 * @param uXcpt The exception to raise on table lookup error.
11082 */
11083IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11084{
11085 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11086}
11087
11088
11089/**
11090 * Fakes a long mode stack selector for SS = 0.
11091 *
11092 * @param pDescSs Where to return the fake stack descriptor.
11093 * @param uDpl The DPL we want.
11094 */
11095IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11096{
11097 pDescSs->Long.au64[0] = 0;
11098 pDescSs->Long.au64[1] = 0;
11099 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11100 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11101 pDescSs->Long.Gen.u2Dpl = uDpl;
11102 pDescSs->Long.Gen.u1Present = 1;
11103 pDescSs->Long.Gen.u1Long = 1;
11104}
11105
11106
11107/**
11108 * Marks the selector descriptor as accessed (only non-system descriptors).
11109 *
11110 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11111 * will therefore skip the limit checks.
11112 *
11113 * @returns Strict VBox status code.
11114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11115 * @param uSel The selector.
11116 */
11117IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11118{
11119 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11120
11121 /*
11122 * Get the selector table base and calculate the entry address.
11123 */
11124 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11125 ? pCtx->ldtr.u64Base
11126 : pCtx->gdtr.pGdt;
11127 GCPtr += uSel & X86_SEL_MASK;
11128
11129 /*
11130 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11131 * ugly stuff to avoid this. This will make sure it's an atomic access
11132 * as well more or less remove any question about 8-bit or 32-bit accesss.
11133 */
11134 VBOXSTRICTRC rcStrict;
11135 uint32_t volatile *pu32;
11136 if ((GCPtr & 3) == 0)
11137 {
11138 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11139 GCPtr += 2 + 2;
11140 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11141 if (rcStrict != VINF_SUCCESS)
11142 return rcStrict;
11143 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11144 }
11145 else
11146 {
11147 /* The misaligned GDT/LDT case, map the whole thing. */
11148 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11149 if (rcStrict != VINF_SUCCESS)
11150 return rcStrict;
11151 switch ((uintptr_t)pu32 & 3)
11152 {
11153 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11154 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11155 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11156 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11157 }
11158 }
11159
11160 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11161}
11162
11163/** @} */
11164
11165
11166/*
11167 * Include the C/C++ implementation of instruction.
11168 */
11169#include "IEMAllCImpl.cpp.h"
11170
11171
11172
11173/** @name "Microcode" macros.
11174 *
11175 * The idea is that we should be able to use the same code to interpret
11176 * instructions as well as recompiler instructions. Thus this obfuscation.
11177 *
11178 * @{
11179 */
11180#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11181#define IEM_MC_END() }
11182#define IEM_MC_PAUSE() do {} while (0)
11183#define IEM_MC_CONTINUE() do {} while (0)
11184
11185/** Internal macro. */
11186#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11187 do \
11188 { \
11189 VBOXSTRICTRC rcStrict2 = a_Expr; \
11190 if (rcStrict2 != VINF_SUCCESS) \
11191 return rcStrict2; \
11192 } while (0)
11193
11194
11195#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11196#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11197#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11198#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11199#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11200#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11201#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11202#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11203#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11204 do { \
11205 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11206 return iemRaiseDeviceNotAvailable(pVCpu); \
11207 } while (0)
11208#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11209 do { \
11210 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11211 return iemRaiseDeviceNotAvailable(pVCpu); \
11212 } while (0)
11213#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11214 do { \
11215 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11216 return iemRaiseMathFault(pVCpu); \
11217 } while (0)
11218#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11219 do { \
11220 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11221 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11222 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11223 return iemRaiseUndefinedOpcode(pVCpu); \
11224 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11225 return iemRaiseDeviceNotAvailable(pVCpu); \
11226 } while (0)
11227#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11228 do { \
11229 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11230 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11231 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11232 return iemRaiseUndefinedOpcode(pVCpu); \
11233 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11234 return iemRaiseDeviceNotAvailable(pVCpu); \
11235 } while (0)
11236#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11237 do { \
11238 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11239 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11240 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11241 return iemRaiseUndefinedOpcode(pVCpu); \
11242 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11243 return iemRaiseDeviceNotAvailable(pVCpu); \
11244 } while (0)
11245#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11246 do { \
11247 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11248 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11249 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11250 return iemRaiseUndefinedOpcode(pVCpu); \
11251 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11252 return iemRaiseDeviceNotAvailable(pVCpu); \
11253 } while (0)
11254#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11255 do { \
11256 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11257 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11258 return iemRaiseUndefinedOpcode(pVCpu); \
11259 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11260 return iemRaiseDeviceNotAvailable(pVCpu); \
11261 } while (0)
11262#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11263 do { \
11264 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11265 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11266 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11267 return iemRaiseUndefinedOpcode(pVCpu); \
11268 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11269 return iemRaiseDeviceNotAvailable(pVCpu); \
11270 } while (0)
11271#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11272 do { \
11273 if (pVCpu->iem.s.uCpl != 0) \
11274 return iemRaiseGeneralProtectionFault0(pVCpu); \
11275 } while (0)
11276#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11277 do { \
11278 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11279 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11280 } while (0)
11281
11282
11283#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11284#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11285#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11286#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11287#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11288#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11289#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11290 uint32_t a_Name; \
11291 uint32_t *a_pName = &a_Name
11292#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11293 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11294
11295#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11296#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11297
11298#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11299#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11300#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11301#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11302#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11303#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11304#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11305#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11306#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11312#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11313#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11314#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11315#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11316#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11317#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11318#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11319#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11320#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11321#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11322#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11323#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11324#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11325#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11326#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11327/** @note Not for IOPL or IF testing or modification. */
11328#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11329#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11330#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11331#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11332
11333#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11334#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11335#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11336#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11337#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11338#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11339#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11340#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11341#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11342#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11343#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11344 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11345
11346#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11347#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11348/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11349 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11350#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11351#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11352/** @note Not for IOPL or IF testing or modification. */
11353#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11354
11355#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11356#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11357#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11358 do { \
11359 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11360 *pu32Reg += (a_u32Value); \
11361 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11362 } while (0)
11363#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11364
11365#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11366#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11367#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11368 do { \
11369 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11370 *pu32Reg -= (a_u32Value); \
11371 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11372 } while (0)
11373#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11374#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11375
11376#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11377#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11378#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11379#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11380#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11381#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11382#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11383
11384#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11385#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11386#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11387#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11388
11389#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11390#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11391#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11392
11393#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11394#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11395#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11396
11397#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11398#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11399#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11400
11401#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11402#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11403#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11404
11405#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11406
11407#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11408
11409#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11410#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11411#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11412 do { \
11413 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11414 *pu32Reg &= (a_u32Value); \
11415 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11416 } while (0)
11417#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11418
11419#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11420#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11421#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11422 do { \
11423 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11424 *pu32Reg |= (a_u32Value); \
11425 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11426 } while (0)
11427#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11428
11429
11430/** @note Not for IOPL or IF modification. */
11431#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11432/** @note Not for IOPL or IF modification. */
11433#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11434/** @note Not for IOPL or IF modification. */
11435#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11436
11437#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11438
11439/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11440#define IEM_MC_FPU_TO_MMX_MODE() do { \
11441 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11442 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11443 } while (0)
11444
11445#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11446 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11447#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11448 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11449#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11450 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11451 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11452 } while (0)
11453#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11454 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11455 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11456 } while (0)
11457#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11458 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11459#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11460 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11461#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11462 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11463
11464#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11465 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11466 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11467 } while (0)
11468#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11469 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11470#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11471 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11472#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11473 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11474#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11475 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11476 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11477 } while (0)
11478#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11479 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11480#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11481 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11482 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11483 } while (0)
11484#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11485 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11486#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11487 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11488 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11489 } while (0)
11490#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11491 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11492#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11493 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11494#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11495 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11496#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11497 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11498#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11499 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11500 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11501 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11502 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11503 } while (0)
11504
11505#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11506 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11507 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11508 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11509 } while (0)
11510#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11511 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11512 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11513 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11514 } while (0)
11515#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11516 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11517 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11518 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11519 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11520 } while (0)
11521#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11522 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11523 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11524 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11525 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11526 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11527 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11528 } while (0)
11529
11530#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11531#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11532 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11533 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11534 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11535 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11536 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11537 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11538 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11539 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11540 } while (0)
11541#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11542 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11543 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11544 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11545 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11546 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11547 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11548 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11549 } while (0)
11550#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11551 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11552 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11553 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11554 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11555 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11556 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11557 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11558 } while (0)
11559#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11560 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11561 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11562 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11563 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11564 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11565 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11566 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11567 } while (0)
11568
11569#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11570 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11571#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11572 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11573#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11574 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11575#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11576 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11577 uintptr_t const iYRegTmp = (a_iYReg); \
11578 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11579 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11580 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11581 } while (0)
11582
11583#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11584 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11585 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11586 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11587 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11588 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11589 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11590 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11591 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11592 } while (0)
11593#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11594 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11595 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11596 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11597 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11598 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11599 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11600 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11601 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11602 } while (0)
11603
11604#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11605 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11606 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11607 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11608 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11609 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11610 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11611 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11612 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11613 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11614 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11615 } while (0)
11616#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11617 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11618 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11619 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11620 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11621 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11622 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11623 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11624 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11625 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11626 } while (0)
11627#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11628 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11629 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11630 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11631 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11632 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11633 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11634 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11635 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11636 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11637 } while (0)
11638#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11639 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11640 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11641 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11642 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11643 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11644 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11645 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11646 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11647 } while (0)
11648
11649#ifndef IEM_WITH_SETJMP
11650# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11651 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11652# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11653 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11654# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11655 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11656#else
11657# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11658 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11659# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11660 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11661# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11662 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11663#endif
11664
11665#ifndef IEM_WITH_SETJMP
11666# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11667 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11668# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11669 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11670# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11671 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11672#else
11673# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11674 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11675# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11676 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11677# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11678 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11679#endif
11680
11681#ifndef IEM_WITH_SETJMP
11682# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11683 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11684# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11685 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11686# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11687 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11688#else
11689# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11690 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11691# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11692 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11693# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11694 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11695#endif
11696
11697#ifdef SOME_UNUSED_FUNCTION
11698# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11700#endif
11701
11702#ifndef IEM_WITH_SETJMP
11703# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11704 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11705# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11706 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11707# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11709# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11710 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11711#else
11712# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11713 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11714# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11715 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11716# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11717 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11718# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11719 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11720#endif
11721
11722#ifndef IEM_WITH_SETJMP
11723# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11724 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11725# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11727# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11728 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11729#else
11730# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11731 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11732# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11733 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11734# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11735 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11736#endif
11737
11738#ifndef IEM_WITH_SETJMP
11739# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11740 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11741# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11742 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11743#else
11744# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11745 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11746# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11747 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11748#endif
11749
11750#ifndef IEM_WITH_SETJMP
11751# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11752 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11753# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11754 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11755#else
11756# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11757 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11758# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11759 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11760#endif
11761
11762
11763
11764#ifndef IEM_WITH_SETJMP
11765# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11766 do { \
11767 uint8_t u8Tmp; \
11768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11769 (a_u16Dst) = u8Tmp; \
11770 } while (0)
11771# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11772 do { \
11773 uint8_t u8Tmp; \
11774 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11775 (a_u32Dst) = u8Tmp; \
11776 } while (0)
11777# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11778 do { \
11779 uint8_t u8Tmp; \
11780 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11781 (a_u64Dst) = u8Tmp; \
11782 } while (0)
11783# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11784 do { \
11785 uint16_t u16Tmp; \
11786 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11787 (a_u32Dst) = u16Tmp; \
11788 } while (0)
11789# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11790 do { \
11791 uint16_t u16Tmp; \
11792 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11793 (a_u64Dst) = u16Tmp; \
11794 } while (0)
11795# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11796 do { \
11797 uint32_t u32Tmp; \
11798 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11799 (a_u64Dst) = u32Tmp; \
11800 } while (0)
11801#else /* IEM_WITH_SETJMP */
11802# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11803 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11804# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11805 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11806# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11807 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11808# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11809 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11810# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11811 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11812# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11813 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11814#endif /* IEM_WITH_SETJMP */
11815
11816#ifndef IEM_WITH_SETJMP
11817# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11818 do { \
11819 uint8_t u8Tmp; \
11820 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11821 (a_u16Dst) = (int8_t)u8Tmp; \
11822 } while (0)
11823# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11824 do { \
11825 uint8_t u8Tmp; \
11826 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11827 (a_u32Dst) = (int8_t)u8Tmp; \
11828 } while (0)
11829# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11830 do { \
11831 uint8_t u8Tmp; \
11832 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11833 (a_u64Dst) = (int8_t)u8Tmp; \
11834 } while (0)
11835# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11836 do { \
11837 uint16_t u16Tmp; \
11838 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11839 (a_u32Dst) = (int16_t)u16Tmp; \
11840 } while (0)
11841# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11842 do { \
11843 uint16_t u16Tmp; \
11844 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11845 (a_u64Dst) = (int16_t)u16Tmp; \
11846 } while (0)
11847# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11848 do { \
11849 uint32_t u32Tmp; \
11850 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11851 (a_u64Dst) = (int32_t)u32Tmp; \
11852 } while (0)
11853#else /* IEM_WITH_SETJMP */
11854# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11855 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11856# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11857 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11858# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11859 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11860# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11861 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11862# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11863 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11864# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11865 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11866#endif /* IEM_WITH_SETJMP */
11867
11868#ifndef IEM_WITH_SETJMP
11869# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11870 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11871# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11872 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11873# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11874 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11875# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11876 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11877#else
11878# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11879 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11880# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11881 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11882# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11883 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11884# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11885 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11886#endif
11887
11888#ifndef IEM_WITH_SETJMP
11889# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11890 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11891# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11892 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11893# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11894 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11895# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11896 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11897#else
11898# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11899 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11900# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11901 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11902# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11903 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11904# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11905 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11906#endif
11907
11908#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11909#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11910#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11911#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11912#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11913#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11914#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11915 do { \
11916 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11917 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11918 } while (0)
11919
11920#ifndef IEM_WITH_SETJMP
11921# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11922 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11923# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11924 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11925#else
11926# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11927 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11928# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11929 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11930#endif
11931
11932#ifndef IEM_WITH_SETJMP
11933# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11934 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11935# define IEM_MC_STORE_MEM_U256_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u256Value) \
11936 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11937#else
11938# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11939 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11940# define IEM_MC_STORE_MEM_U256_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u256Value) \
11941 iemMemStoreDataU256AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11942#endif
11943
11944
11945#define IEM_MC_PUSH_U16(a_u16Value) \
11946 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11947#define IEM_MC_PUSH_U32(a_u32Value) \
11948 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11949#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11950 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11951#define IEM_MC_PUSH_U64(a_u64Value) \
11952 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11953
11954#define IEM_MC_POP_U16(a_pu16Value) \
11955 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11956#define IEM_MC_POP_U32(a_pu32Value) \
11957 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11958#define IEM_MC_POP_U64(a_pu64Value) \
11959 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11960
11961/** Maps guest memory for direct or bounce buffered access.
11962 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11963 * @remarks May return.
11964 */
11965#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11966 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11967
11968/** Maps guest memory for direct or bounce buffered access.
11969 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11970 * @remarks May return.
11971 */
11972#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11973 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11974
11975/** Commits the memory and unmaps the guest memory.
11976 * @remarks May return.
11977 */
11978#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11979 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11980
11981/** Commits the memory and unmaps the guest memory unless the FPU status word
11982 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11983 * that would cause FLD not to store.
11984 *
11985 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11986 * store, while \#P will not.
11987 *
11988 * @remarks May in theory return - for now.
11989 */
11990#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11991 do { \
11992 if ( !(a_u16FSW & X86_FSW_ES) \
11993 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11994 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11995 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11996 } while (0)
11997
11998/** Calculate efficient address from R/M. */
11999#ifndef IEM_WITH_SETJMP
12000# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12001 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12002#else
12003# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12004 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12005#endif
12006
12007#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12008#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12009#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12010#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12011#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12012#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12013#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12014
12015/**
12016 * Defers the rest of the instruction emulation to a C implementation routine
12017 * and returns, only taking the standard parameters.
12018 *
12019 * @param a_pfnCImpl The pointer to the C routine.
12020 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12021 */
12022#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12023
12024/**
12025 * Defers the rest of instruction emulation to a C implementation routine and
12026 * returns, taking one argument in addition to the standard ones.
12027 *
12028 * @param a_pfnCImpl The pointer to the C routine.
12029 * @param a0 The argument.
12030 */
12031#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12032
12033/**
12034 * Defers the rest of the instruction emulation to a C implementation routine
12035 * and returns, taking two arguments in addition to the standard ones.
12036 *
12037 * @param a_pfnCImpl The pointer to the C routine.
12038 * @param a0 The first extra argument.
12039 * @param a1 The second extra argument.
12040 */
12041#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12042
12043/**
12044 * Defers the rest of the instruction emulation to a C implementation routine
12045 * and returns, taking three arguments in addition to the standard ones.
12046 *
12047 * @param a_pfnCImpl The pointer to the C routine.
12048 * @param a0 The first extra argument.
12049 * @param a1 The second extra argument.
12050 * @param a2 The third extra argument.
12051 */
12052#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12053
12054/**
12055 * Defers the rest of the instruction emulation to a C implementation routine
12056 * and returns, taking four arguments in addition to the standard ones.
12057 *
12058 * @param a_pfnCImpl The pointer to the C routine.
12059 * @param a0 The first extra argument.
12060 * @param a1 The second extra argument.
12061 * @param a2 The third extra argument.
12062 * @param a3 The fourth extra argument.
12063 */
12064#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12065
12066/**
12067 * Defers the rest of the instruction emulation to a C implementation routine
12068 * and returns, taking two arguments in addition to the standard ones.
12069 *
12070 * @param a_pfnCImpl The pointer to the C routine.
12071 * @param a0 The first extra argument.
12072 * @param a1 The second extra argument.
12073 * @param a2 The third extra argument.
12074 * @param a3 The fourth extra argument.
12075 * @param a4 The fifth extra argument.
12076 */
12077#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12078
12079/**
12080 * Defers the entire instruction emulation to a C implementation routine and
12081 * returns, only taking the standard parameters.
12082 *
12083 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12084 *
12085 * @param a_pfnCImpl The pointer to the C routine.
12086 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12087 */
12088#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12089
12090/**
12091 * Defers the entire instruction emulation to a C implementation routine and
12092 * returns, taking one argument in addition to the standard ones.
12093 *
12094 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12095 *
12096 * @param a_pfnCImpl The pointer to the C routine.
12097 * @param a0 The argument.
12098 */
12099#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12100
12101/**
12102 * Defers the entire instruction emulation to a C implementation routine and
12103 * returns, taking two arguments in addition to the standard ones.
12104 *
12105 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12106 *
12107 * @param a_pfnCImpl The pointer to the C routine.
12108 * @param a0 The first extra argument.
12109 * @param a1 The second extra argument.
12110 */
12111#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12112
12113/**
12114 * Defers the entire instruction emulation to a C implementation routine and
12115 * returns, taking three arguments in addition to the standard ones.
12116 *
12117 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12118 *
12119 * @param a_pfnCImpl The pointer to the C routine.
12120 * @param a0 The first extra argument.
12121 * @param a1 The second extra argument.
12122 * @param a2 The third extra argument.
12123 */
12124#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12125
12126/**
12127 * Calls a FPU assembly implementation taking one visible argument.
12128 *
12129 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12130 * @param a0 The first extra argument.
12131 */
12132#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12133 do { \
12134 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12135 } while (0)
12136
12137/**
12138 * Calls a FPU assembly implementation taking two visible arguments.
12139 *
12140 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12141 * @param a0 The first extra argument.
12142 * @param a1 The second extra argument.
12143 */
12144#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12145 do { \
12146 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12147 } while (0)
12148
12149/**
12150 * Calls a FPU assembly implementation taking three visible arguments.
12151 *
12152 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12153 * @param a0 The first extra argument.
12154 * @param a1 The second extra argument.
12155 * @param a2 The third extra argument.
12156 */
12157#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12158 do { \
12159 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12160 } while (0)
12161
12162#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12163 do { \
12164 (a_FpuData).FSW = (a_FSW); \
12165 (a_FpuData).r80Result = *(a_pr80Value); \
12166 } while (0)
12167
12168/** Pushes FPU result onto the stack. */
12169#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12170 iemFpuPushResult(pVCpu, &a_FpuData)
12171/** Pushes FPU result onto the stack and sets the FPUDP. */
12172#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12173 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12174
12175/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12176#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12177 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12178
12179/** Stores FPU result in a stack register. */
12180#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12181 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12182/** Stores FPU result in a stack register and pops the stack. */
12183#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12184 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12185/** Stores FPU result in a stack register and sets the FPUDP. */
12186#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12187 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12188/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12189 * stack. */
12190#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12191 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12192
12193/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12194#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12195 iemFpuUpdateOpcodeAndIp(pVCpu)
12196/** Free a stack register (for FFREE and FFREEP). */
12197#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12198 iemFpuStackFree(pVCpu, a_iStReg)
12199/** Increment the FPU stack pointer. */
12200#define IEM_MC_FPU_STACK_INC_TOP() \
12201 iemFpuStackIncTop(pVCpu)
12202/** Decrement the FPU stack pointer. */
12203#define IEM_MC_FPU_STACK_DEC_TOP() \
12204 iemFpuStackDecTop(pVCpu)
12205
12206/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12207#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12208 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12209/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12210#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12211 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12212/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12213#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12214 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12215/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12216#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12217 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12218/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12219 * stack. */
12220#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12221 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12222/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12223#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12224 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12225
12226/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12227#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12228 iemFpuStackUnderflow(pVCpu, a_iStDst)
12229/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12230 * stack. */
12231#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12232 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12233/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12234 * FPUDS. */
12235#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12236 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12237/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12238 * FPUDS. Pops stack. */
12239#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12240 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12241/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12242 * stack twice. */
12243#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12244 iemFpuStackUnderflowThenPopPop(pVCpu)
12245/** Raises a FPU stack underflow exception for an instruction pushing a result
12246 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12247#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12248 iemFpuStackPushUnderflow(pVCpu)
12249/** Raises a FPU stack underflow exception for an instruction pushing a result
12250 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12251#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12252 iemFpuStackPushUnderflowTwo(pVCpu)
12253
12254/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12255 * FPUIP, FPUCS and FOP. */
12256#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12257 iemFpuStackPushOverflow(pVCpu)
12258/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12259 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12260#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12261 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12262/** Prepares for using the FPU state.
12263 * Ensures that we can use the host FPU in the current context (RC+R0.
12264 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12265#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12266/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12267#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12268/** Actualizes the guest FPU state so it can be accessed and modified. */
12269#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12270
12271/** Prepares for using the SSE state.
12272 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12273 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12274#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12275/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12276#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12277/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12278#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12279
12280/** Prepares for using the AVX state.
12281 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12282 * Ensures the guest AVX state in the CPUMCTX is up to date.
12283 * @note This will include the AVX512 state too when support for it is added
12284 * due to the zero extending feature of VEX instruction. */
12285#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12286/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12287#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12288/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12289#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12290
12291/**
12292 * Calls a MMX assembly implementation taking two visible arguments.
12293 *
12294 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12295 * @param a0 The first extra argument.
12296 * @param a1 The second extra argument.
12297 */
12298#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12299 do { \
12300 IEM_MC_PREPARE_FPU_USAGE(); \
12301 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12302 } while (0)
12303
12304/**
12305 * Calls a MMX assembly implementation taking three visible arguments.
12306 *
12307 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12308 * @param a0 The first extra argument.
12309 * @param a1 The second extra argument.
12310 * @param a2 The third extra argument.
12311 */
12312#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12313 do { \
12314 IEM_MC_PREPARE_FPU_USAGE(); \
12315 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12316 } while (0)
12317
12318
12319/**
12320 * Calls a SSE assembly implementation taking two visible arguments.
12321 *
12322 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12323 * @param a0 The first extra argument.
12324 * @param a1 The second extra argument.
12325 */
12326#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12327 do { \
12328 IEM_MC_PREPARE_SSE_USAGE(); \
12329 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12330 } while (0)
12331
12332/**
12333 * Calls a SSE assembly implementation taking three visible arguments.
12334 *
12335 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12336 * @param a0 The first extra argument.
12337 * @param a1 The second extra argument.
12338 * @param a2 The third extra argument.
12339 */
12340#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12341 do { \
12342 IEM_MC_PREPARE_SSE_USAGE(); \
12343 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12344 } while (0)
12345
12346
12347/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12348 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12349#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12350 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState), 0)
12351
12352/**
12353 * Calls a AVX assembly implementation taking two visible arguments.
12354 *
12355 * There is one implicit zero'th argument, a pointer to the extended state.
12356 *
12357 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12358 * @param a1 The first extra argument.
12359 * @param a2 The second extra argument.
12360 */
12361#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12362 do { \
12363 IEM_MC_PREPARE_AVX_USAGE(); \
12364 a_pfnAImpl(pXState, (a1), (a2)); \
12365 } while (0)
12366
12367/**
12368 * Calls a AVX assembly implementation taking three visible arguments.
12369 *
12370 * There is one implicit zero'th argument, a pointer to the extended state.
12371 *
12372 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12373 * @param a1 The first extra argument.
12374 * @param a2 The second extra argument.
12375 * @param a3 The third extra argument.
12376 */
12377#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12378 do { \
12379 IEM_MC_PREPARE_AVX_USAGE(); \
12380 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12381 } while (0)
12382
12383/** @note Not for IOPL or IF testing. */
12384#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12385/** @note Not for IOPL or IF testing. */
12386#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12387/** @note Not for IOPL or IF testing. */
12388#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12389/** @note Not for IOPL or IF testing. */
12390#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12391/** @note Not for IOPL or IF testing. */
12392#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12393 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12394 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12395/** @note Not for IOPL or IF testing. */
12396#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12397 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12398 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12399/** @note Not for IOPL or IF testing. */
12400#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12401 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12402 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12403 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12404/** @note Not for IOPL or IF testing. */
12405#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12406 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12407 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12408 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12409#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12410#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12411#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12412/** @note Not for IOPL or IF testing. */
12413#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12414 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12415 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12416/** @note Not for IOPL or IF testing. */
12417#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12418 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12419 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12420/** @note Not for IOPL or IF testing. */
12421#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12422 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12423 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12424/** @note Not for IOPL or IF testing. */
12425#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12426 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12427 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12428/** @note Not for IOPL or IF testing. */
12429#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12430 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12431 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12432/** @note Not for IOPL or IF testing. */
12433#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12434 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12435 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12436#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12437#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12438
12439#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12440 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12441#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12442 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12443#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12444 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12445#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12446 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12447#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12448 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12449#define IEM_MC_IF_FCW_IM() \
12450 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12451
12452#define IEM_MC_ELSE() } else {
12453#define IEM_MC_ENDIF() } do {} while (0)
12454
12455/** @} */
12456
12457
12458/** @name Opcode Debug Helpers.
12459 * @{
12460 */
12461#ifdef VBOX_WITH_STATISTICS
12462# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12463#else
12464# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12465#endif
12466
12467#ifdef DEBUG
12468# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12469 do { \
12470 IEMOP_INC_STATS(a_Stats); \
12471 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12472 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12473 } while (0)
12474
12475# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12476 do { \
12477 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12478 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12479 (void)RT_CONCAT(OP_,a_Upper); \
12480 (void)(a_fDisHints); \
12481 (void)(a_fIemHints); \
12482 } while (0)
12483
12484# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12485 do { \
12486 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12487 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12488 (void)RT_CONCAT(OP_,a_Upper); \
12489 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12490 (void)(a_fDisHints); \
12491 (void)(a_fIemHints); \
12492 } while (0)
12493
12494# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12495 do { \
12496 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12497 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12498 (void)RT_CONCAT(OP_,a_Upper); \
12499 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12500 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12501 (void)(a_fDisHints); \
12502 (void)(a_fIemHints); \
12503 } while (0)
12504
12505# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12506 do { \
12507 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12508 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12509 (void)RT_CONCAT(OP_,a_Upper); \
12510 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12511 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12512 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12513 (void)(a_fDisHints); \
12514 (void)(a_fIemHints); \
12515 } while (0)
12516
12517# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12518 do { \
12519 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12520 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12521 (void)RT_CONCAT(OP_,a_Upper); \
12522 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12523 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12524 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12525 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12526 (void)(a_fDisHints); \
12527 (void)(a_fIemHints); \
12528 } while (0)
12529
12530#else
12531# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12532
12533# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12534 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12535# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12536 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12537# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12538 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12539# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12540 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12541# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12542 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12543
12544#endif
12545
12546#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12547 IEMOP_MNEMONIC0EX(a_Lower, \
12548 #a_Lower, \
12549 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12550#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12551 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12552 #a_Lower " " #a_Op1, \
12553 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12554#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12555 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12556 #a_Lower " " #a_Op1 "," #a_Op2, \
12557 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12558#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12559 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12560 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12561 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12562#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12563 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12564 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12565 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12566
12567/** @} */
12568
12569
12570/** @name Opcode Helpers.
12571 * @{
12572 */
12573
12574#ifdef IN_RING3
12575# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12576 do { \
12577 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12578 else \
12579 { \
12580 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12581 return IEMOP_RAISE_INVALID_OPCODE(); \
12582 } \
12583 } while (0)
12584#else
12585# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12586 do { \
12587 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12588 else return IEMOP_RAISE_INVALID_OPCODE(); \
12589 } while (0)
12590#endif
12591
12592/** The instruction requires a 186 or later. */
12593#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12594# define IEMOP_HLP_MIN_186() do { } while (0)
12595#else
12596# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12597#endif
12598
12599/** The instruction requires a 286 or later. */
12600#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12601# define IEMOP_HLP_MIN_286() do { } while (0)
12602#else
12603# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12604#endif
12605
12606/** The instruction requires a 386 or later. */
12607#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12608# define IEMOP_HLP_MIN_386() do { } while (0)
12609#else
12610# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12611#endif
12612
12613/** The instruction requires a 386 or later if the given expression is true. */
12614#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12615# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12616#else
12617# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12618#endif
12619
12620/** The instruction requires a 486 or later. */
12621#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12622# define IEMOP_HLP_MIN_486() do { } while (0)
12623#else
12624# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12625#endif
12626
12627/** The instruction requires a Pentium (586) or later. */
12628#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12629# define IEMOP_HLP_MIN_586() do { } while (0)
12630#else
12631# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12632#endif
12633
12634/** The instruction requires a PentiumPro (686) or later. */
12635#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12636# define IEMOP_HLP_MIN_686() do { } while (0)
12637#else
12638# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12639#endif
12640
12641
12642/** The instruction raises an \#UD in real and V8086 mode. */
12643#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12644 do \
12645 { \
12646 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12647 else return IEMOP_RAISE_INVALID_OPCODE(); \
12648 } while (0)
12649
12650/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12651 * 64-bit mode. */
12652#define IEMOP_HLP_NO_64BIT() \
12653 do \
12654 { \
12655 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12656 return IEMOP_RAISE_INVALID_OPCODE(); \
12657 } while (0)
12658
12659/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12660 * 64-bit mode. */
12661#define IEMOP_HLP_ONLY_64BIT() \
12662 do \
12663 { \
12664 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12665 return IEMOP_RAISE_INVALID_OPCODE(); \
12666 } while (0)
12667
12668/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12669#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12670 do \
12671 { \
12672 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12673 iemRecalEffOpSize64Default(pVCpu); \
12674 } while (0)
12675
12676/** The instruction has 64-bit operand size if 64-bit mode. */
12677#define IEMOP_HLP_64BIT_OP_SIZE() \
12678 do \
12679 { \
12680 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12681 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12682 } while (0)
12683
12684/** Only a REX prefix immediately preceeding the first opcode byte takes
12685 * effect. This macro helps ensuring this as well as logging bad guest code. */
12686#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12687 do \
12688 { \
12689 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12690 { \
12691 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12692 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12693 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12694 pVCpu->iem.s.uRexB = 0; \
12695 pVCpu->iem.s.uRexIndex = 0; \
12696 pVCpu->iem.s.uRexReg = 0; \
12697 iemRecalEffOpSize(pVCpu); \
12698 } \
12699 } while (0)
12700
12701/**
12702 * Done decoding.
12703 */
12704#define IEMOP_HLP_DONE_DECODING() \
12705 do \
12706 { \
12707 /*nothing for now, maybe later... */ \
12708 } while (0)
12709
12710/**
12711 * Done decoding, raise \#UD exception if lock prefix present.
12712 */
12713#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12714 do \
12715 { \
12716 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12717 { /* likely */ } \
12718 else \
12719 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12720 } while (0)
12721
12722
12723/**
12724 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12725 * repnz or size prefixes are present, or if in real or v8086 mode.
12726 */
12727#define IEMOP_HLP_DONE_VEX_DECODING() \
12728 do \
12729 { \
12730 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12731 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12732 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12733 { /* likely */ } \
12734 else \
12735 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12736 } while (0)
12737
12738/**
12739 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12740 * repnz or size prefixes are present, or if in real or v8086 mode.
12741 */
12742#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12743 do \
12744 { \
12745 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12746 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12747 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12748 && pVCpu->iem.s.uVexLength == 0)) \
12749 { /* likely */ } \
12750 else \
12751 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12752 } while (0)
12753
12754
12755/**
12756 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12757 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12758 * register 0, or if in real or v8086 mode.
12759 */
12760#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12761 do \
12762 { \
12763 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12764 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12765 && !pVCpu->iem.s.uVex3rdReg \
12766 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12767 { /* likely */ } \
12768 else \
12769 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12770 } while (0)
12771
12772/**
12773 * Done decoding VEX, no V, L=0.
12774 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12775 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12776 */
12777#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12778 do \
12779 { \
12780 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12781 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12782 && pVCpu->iem.s.uVexLength == 0 \
12783 && pVCpu->iem.s.uVex3rdReg == 0 \
12784 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12785 { /* likely */ } \
12786 else \
12787 return IEMOP_RAISE_INVALID_OPCODE(); \
12788 } while (0)
12789
12790#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12791 do \
12792 { \
12793 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12794 { /* likely */ } \
12795 else \
12796 { \
12797 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12798 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12799 } \
12800 } while (0)
12801#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12802 do \
12803 { \
12804 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12805 { /* likely */ } \
12806 else \
12807 { \
12808 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12809 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12810 } \
12811 } while (0)
12812
12813/**
12814 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12815 * are present.
12816 */
12817#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12818 do \
12819 { \
12820 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12821 { /* likely */ } \
12822 else \
12823 return IEMOP_RAISE_INVALID_OPCODE(); \
12824 } while (0)
12825
12826
12827#ifdef VBOX_WITH_NESTED_HWVIRT
12828/** Check and handles SVM nested-guest control & instruction intercept. */
12829# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12830 do \
12831 { \
12832 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12833 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12834 } while (0)
12835
12836/** Check and handle SVM nested-guest CR0 read intercept. */
12837# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12838 do \
12839 { \
12840 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12841 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12842 } while (0)
12843
12844#else /* !VBOX_WITH_NESTED_HWVIRT */
12845# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12846# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12847#endif /* !VBOX_WITH_NESTED_HWVIRT */
12848
12849
12850/**
12851 * Calculates the effective address of a ModR/M memory operand.
12852 *
12853 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12854 *
12855 * @return Strict VBox status code.
12856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12857 * @param bRm The ModRM byte.
12858 * @param cbImm The size of any immediate following the
12859 * effective address opcode bytes. Important for
12860 * RIP relative addressing.
12861 * @param pGCPtrEff Where to return the effective address.
12862 */
12863IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12864{
12865 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12866 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12867# define SET_SS_DEF() \
12868 do \
12869 { \
12870 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12871 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12872 } while (0)
12873
12874 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12875 {
12876/** @todo Check the effective address size crap! */
12877 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12878 {
12879 uint16_t u16EffAddr;
12880
12881 /* Handle the disp16 form with no registers first. */
12882 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12883 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12884 else
12885 {
12886 /* Get the displacment. */
12887 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12888 {
12889 case 0: u16EffAddr = 0; break;
12890 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12891 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12892 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12893 }
12894
12895 /* Add the base and index registers to the disp. */
12896 switch (bRm & X86_MODRM_RM_MASK)
12897 {
12898 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12899 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12900 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12901 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12902 case 4: u16EffAddr += pCtx->si; break;
12903 case 5: u16EffAddr += pCtx->di; break;
12904 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12905 case 7: u16EffAddr += pCtx->bx; break;
12906 }
12907 }
12908
12909 *pGCPtrEff = u16EffAddr;
12910 }
12911 else
12912 {
12913 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12914 uint32_t u32EffAddr;
12915
12916 /* Handle the disp32 form with no registers first. */
12917 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12918 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12919 else
12920 {
12921 /* Get the register (or SIB) value. */
12922 switch ((bRm & X86_MODRM_RM_MASK))
12923 {
12924 case 0: u32EffAddr = pCtx->eax; break;
12925 case 1: u32EffAddr = pCtx->ecx; break;
12926 case 2: u32EffAddr = pCtx->edx; break;
12927 case 3: u32EffAddr = pCtx->ebx; break;
12928 case 4: /* SIB */
12929 {
12930 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12931
12932 /* Get the index and scale it. */
12933 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12934 {
12935 case 0: u32EffAddr = pCtx->eax; break;
12936 case 1: u32EffAddr = pCtx->ecx; break;
12937 case 2: u32EffAddr = pCtx->edx; break;
12938 case 3: u32EffAddr = pCtx->ebx; break;
12939 case 4: u32EffAddr = 0; /*none */ break;
12940 case 5: u32EffAddr = pCtx->ebp; break;
12941 case 6: u32EffAddr = pCtx->esi; break;
12942 case 7: u32EffAddr = pCtx->edi; break;
12943 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12944 }
12945 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12946
12947 /* add base */
12948 switch (bSib & X86_SIB_BASE_MASK)
12949 {
12950 case 0: u32EffAddr += pCtx->eax; break;
12951 case 1: u32EffAddr += pCtx->ecx; break;
12952 case 2: u32EffAddr += pCtx->edx; break;
12953 case 3: u32EffAddr += pCtx->ebx; break;
12954 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12955 case 5:
12956 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12957 {
12958 u32EffAddr += pCtx->ebp;
12959 SET_SS_DEF();
12960 }
12961 else
12962 {
12963 uint32_t u32Disp;
12964 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12965 u32EffAddr += u32Disp;
12966 }
12967 break;
12968 case 6: u32EffAddr += pCtx->esi; break;
12969 case 7: u32EffAddr += pCtx->edi; break;
12970 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12971 }
12972 break;
12973 }
12974 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12975 case 6: u32EffAddr = pCtx->esi; break;
12976 case 7: u32EffAddr = pCtx->edi; break;
12977 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12978 }
12979
12980 /* Get and add the displacement. */
12981 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12982 {
12983 case 0:
12984 break;
12985 case 1:
12986 {
12987 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12988 u32EffAddr += i8Disp;
12989 break;
12990 }
12991 case 2:
12992 {
12993 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12994 u32EffAddr += u32Disp;
12995 break;
12996 }
12997 default:
12998 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12999 }
13000
13001 }
13002 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13003 *pGCPtrEff = u32EffAddr;
13004 else
13005 {
13006 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13007 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13008 }
13009 }
13010 }
13011 else
13012 {
13013 uint64_t u64EffAddr;
13014
13015 /* Handle the rip+disp32 form with no registers first. */
13016 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13017 {
13018 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13019 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13020 }
13021 else
13022 {
13023 /* Get the register (or SIB) value. */
13024 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13025 {
13026 case 0: u64EffAddr = pCtx->rax; break;
13027 case 1: u64EffAddr = pCtx->rcx; break;
13028 case 2: u64EffAddr = pCtx->rdx; break;
13029 case 3: u64EffAddr = pCtx->rbx; break;
13030 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13031 case 6: u64EffAddr = pCtx->rsi; break;
13032 case 7: u64EffAddr = pCtx->rdi; break;
13033 case 8: u64EffAddr = pCtx->r8; break;
13034 case 9: u64EffAddr = pCtx->r9; break;
13035 case 10: u64EffAddr = pCtx->r10; break;
13036 case 11: u64EffAddr = pCtx->r11; break;
13037 case 13: u64EffAddr = pCtx->r13; break;
13038 case 14: u64EffAddr = pCtx->r14; break;
13039 case 15: u64EffAddr = pCtx->r15; break;
13040 /* SIB */
13041 case 4:
13042 case 12:
13043 {
13044 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13045
13046 /* Get the index and scale it. */
13047 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13048 {
13049 case 0: u64EffAddr = pCtx->rax; break;
13050 case 1: u64EffAddr = pCtx->rcx; break;
13051 case 2: u64EffAddr = pCtx->rdx; break;
13052 case 3: u64EffAddr = pCtx->rbx; break;
13053 case 4: u64EffAddr = 0; /*none */ break;
13054 case 5: u64EffAddr = pCtx->rbp; break;
13055 case 6: u64EffAddr = pCtx->rsi; break;
13056 case 7: u64EffAddr = pCtx->rdi; break;
13057 case 8: u64EffAddr = pCtx->r8; break;
13058 case 9: u64EffAddr = pCtx->r9; break;
13059 case 10: u64EffAddr = pCtx->r10; break;
13060 case 11: u64EffAddr = pCtx->r11; break;
13061 case 12: u64EffAddr = pCtx->r12; break;
13062 case 13: u64EffAddr = pCtx->r13; break;
13063 case 14: u64EffAddr = pCtx->r14; break;
13064 case 15: u64EffAddr = pCtx->r15; break;
13065 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13066 }
13067 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13068
13069 /* add base */
13070 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13071 {
13072 case 0: u64EffAddr += pCtx->rax; break;
13073 case 1: u64EffAddr += pCtx->rcx; break;
13074 case 2: u64EffAddr += pCtx->rdx; break;
13075 case 3: u64EffAddr += pCtx->rbx; break;
13076 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13077 case 6: u64EffAddr += pCtx->rsi; break;
13078 case 7: u64EffAddr += pCtx->rdi; break;
13079 case 8: u64EffAddr += pCtx->r8; break;
13080 case 9: u64EffAddr += pCtx->r9; break;
13081 case 10: u64EffAddr += pCtx->r10; break;
13082 case 11: u64EffAddr += pCtx->r11; break;
13083 case 12: u64EffAddr += pCtx->r12; break;
13084 case 14: u64EffAddr += pCtx->r14; break;
13085 case 15: u64EffAddr += pCtx->r15; break;
13086 /* complicated encodings */
13087 case 5:
13088 case 13:
13089 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13090 {
13091 if (!pVCpu->iem.s.uRexB)
13092 {
13093 u64EffAddr += pCtx->rbp;
13094 SET_SS_DEF();
13095 }
13096 else
13097 u64EffAddr += pCtx->r13;
13098 }
13099 else
13100 {
13101 uint32_t u32Disp;
13102 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13103 u64EffAddr += (int32_t)u32Disp;
13104 }
13105 break;
13106 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13107 }
13108 break;
13109 }
13110 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13111 }
13112
13113 /* Get and add the displacement. */
13114 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13115 {
13116 case 0:
13117 break;
13118 case 1:
13119 {
13120 int8_t i8Disp;
13121 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13122 u64EffAddr += i8Disp;
13123 break;
13124 }
13125 case 2:
13126 {
13127 uint32_t u32Disp;
13128 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13129 u64EffAddr += (int32_t)u32Disp;
13130 break;
13131 }
13132 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13133 }
13134
13135 }
13136
13137 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13138 *pGCPtrEff = u64EffAddr;
13139 else
13140 {
13141 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13142 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13143 }
13144 }
13145
13146 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13147 return VINF_SUCCESS;
13148}
13149
13150
13151/**
13152 * Calculates the effective address of a ModR/M memory operand.
13153 *
13154 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13155 *
13156 * @return Strict VBox status code.
13157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13158 * @param bRm The ModRM byte.
13159 * @param cbImm The size of any immediate following the
13160 * effective address opcode bytes. Important for
13161 * RIP relative addressing.
13162 * @param pGCPtrEff Where to return the effective address.
13163 * @param offRsp RSP displacement.
13164 */
13165IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13166{
13167 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13168 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13169# define SET_SS_DEF() \
13170 do \
13171 { \
13172 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13173 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13174 } while (0)
13175
13176 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13177 {
13178/** @todo Check the effective address size crap! */
13179 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13180 {
13181 uint16_t u16EffAddr;
13182
13183 /* Handle the disp16 form with no registers first. */
13184 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13185 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13186 else
13187 {
13188 /* Get the displacment. */
13189 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13190 {
13191 case 0: u16EffAddr = 0; break;
13192 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13193 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13194 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13195 }
13196
13197 /* Add the base and index registers to the disp. */
13198 switch (bRm & X86_MODRM_RM_MASK)
13199 {
13200 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13201 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13202 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13203 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13204 case 4: u16EffAddr += pCtx->si; break;
13205 case 5: u16EffAddr += pCtx->di; break;
13206 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13207 case 7: u16EffAddr += pCtx->bx; break;
13208 }
13209 }
13210
13211 *pGCPtrEff = u16EffAddr;
13212 }
13213 else
13214 {
13215 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13216 uint32_t u32EffAddr;
13217
13218 /* Handle the disp32 form with no registers first. */
13219 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13220 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13221 else
13222 {
13223 /* Get the register (or SIB) value. */
13224 switch ((bRm & X86_MODRM_RM_MASK))
13225 {
13226 case 0: u32EffAddr = pCtx->eax; break;
13227 case 1: u32EffAddr = pCtx->ecx; break;
13228 case 2: u32EffAddr = pCtx->edx; break;
13229 case 3: u32EffAddr = pCtx->ebx; break;
13230 case 4: /* SIB */
13231 {
13232 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13233
13234 /* Get the index and scale it. */
13235 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13236 {
13237 case 0: u32EffAddr = pCtx->eax; break;
13238 case 1: u32EffAddr = pCtx->ecx; break;
13239 case 2: u32EffAddr = pCtx->edx; break;
13240 case 3: u32EffAddr = pCtx->ebx; break;
13241 case 4: u32EffAddr = 0; /*none */ break;
13242 case 5: u32EffAddr = pCtx->ebp; break;
13243 case 6: u32EffAddr = pCtx->esi; break;
13244 case 7: u32EffAddr = pCtx->edi; break;
13245 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13246 }
13247 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13248
13249 /* add base */
13250 switch (bSib & X86_SIB_BASE_MASK)
13251 {
13252 case 0: u32EffAddr += pCtx->eax; break;
13253 case 1: u32EffAddr += pCtx->ecx; break;
13254 case 2: u32EffAddr += pCtx->edx; break;
13255 case 3: u32EffAddr += pCtx->ebx; break;
13256 case 4:
13257 u32EffAddr += pCtx->esp + offRsp;
13258 SET_SS_DEF();
13259 break;
13260 case 5:
13261 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13262 {
13263 u32EffAddr += pCtx->ebp;
13264 SET_SS_DEF();
13265 }
13266 else
13267 {
13268 uint32_t u32Disp;
13269 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13270 u32EffAddr += u32Disp;
13271 }
13272 break;
13273 case 6: u32EffAddr += pCtx->esi; break;
13274 case 7: u32EffAddr += pCtx->edi; break;
13275 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13276 }
13277 break;
13278 }
13279 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13280 case 6: u32EffAddr = pCtx->esi; break;
13281 case 7: u32EffAddr = pCtx->edi; break;
13282 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13283 }
13284
13285 /* Get and add the displacement. */
13286 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13287 {
13288 case 0:
13289 break;
13290 case 1:
13291 {
13292 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13293 u32EffAddr += i8Disp;
13294 break;
13295 }
13296 case 2:
13297 {
13298 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13299 u32EffAddr += u32Disp;
13300 break;
13301 }
13302 default:
13303 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13304 }
13305
13306 }
13307 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13308 *pGCPtrEff = u32EffAddr;
13309 else
13310 {
13311 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13312 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13313 }
13314 }
13315 }
13316 else
13317 {
13318 uint64_t u64EffAddr;
13319
13320 /* Handle the rip+disp32 form with no registers first. */
13321 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13322 {
13323 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13324 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13325 }
13326 else
13327 {
13328 /* Get the register (or SIB) value. */
13329 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13330 {
13331 case 0: u64EffAddr = pCtx->rax; break;
13332 case 1: u64EffAddr = pCtx->rcx; break;
13333 case 2: u64EffAddr = pCtx->rdx; break;
13334 case 3: u64EffAddr = pCtx->rbx; break;
13335 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13336 case 6: u64EffAddr = pCtx->rsi; break;
13337 case 7: u64EffAddr = pCtx->rdi; break;
13338 case 8: u64EffAddr = pCtx->r8; break;
13339 case 9: u64EffAddr = pCtx->r9; break;
13340 case 10: u64EffAddr = pCtx->r10; break;
13341 case 11: u64EffAddr = pCtx->r11; break;
13342 case 13: u64EffAddr = pCtx->r13; break;
13343 case 14: u64EffAddr = pCtx->r14; break;
13344 case 15: u64EffAddr = pCtx->r15; break;
13345 /* SIB */
13346 case 4:
13347 case 12:
13348 {
13349 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13350
13351 /* Get the index and scale it. */
13352 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13353 {
13354 case 0: u64EffAddr = pCtx->rax; break;
13355 case 1: u64EffAddr = pCtx->rcx; break;
13356 case 2: u64EffAddr = pCtx->rdx; break;
13357 case 3: u64EffAddr = pCtx->rbx; break;
13358 case 4: u64EffAddr = 0; /*none */ break;
13359 case 5: u64EffAddr = pCtx->rbp; break;
13360 case 6: u64EffAddr = pCtx->rsi; break;
13361 case 7: u64EffAddr = pCtx->rdi; break;
13362 case 8: u64EffAddr = pCtx->r8; break;
13363 case 9: u64EffAddr = pCtx->r9; break;
13364 case 10: u64EffAddr = pCtx->r10; break;
13365 case 11: u64EffAddr = pCtx->r11; break;
13366 case 12: u64EffAddr = pCtx->r12; break;
13367 case 13: u64EffAddr = pCtx->r13; break;
13368 case 14: u64EffAddr = pCtx->r14; break;
13369 case 15: u64EffAddr = pCtx->r15; break;
13370 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13371 }
13372 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13373
13374 /* add base */
13375 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13376 {
13377 case 0: u64EffAddr += pCtx->rax; break;
13378 case 1: u64EffAddr += pCtx->rcx; break;
13379 case 2: u64EffAddr += pCtx->rdx; break;
13380 case 3: u64EffAddr += pCtx->rbx; break;
13381 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13382 case 6: u64EffAddr += pCtx->rsi; break;
13383 case 7: u64EffAddr += pCtx->rdi; break;
13384 case 8: u64EffAddr += pCtx->r8; break;
13385 case 9: u64EffAddr += pCtx->r9; break;
13386 case 10: u64EffAddr += pCtx->r10; break;
13387 case 11: u64EffAddr += pCtx->r11; break;
13388 case 12: u64EffAddr += pCtx->r12; break;
13389 case 14: u64EffAddr += pCtx->r14; break;
13390 case 15: u64EffAddr += pCtx->r15; break;
13391 /* complicated encodings */
13392 case 5:
13393 case 13:
13394 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13395 {
13396 if (!pVCpu->iem.s.uRexB)
13397 {
13398 u64EffAddr += pCtx->rbp;
13399 SET_SS_DEF();
13400 }
13401 else
13402 u64EffAddr += pCtx->r13;
13403 }
13404 else
13405 {
13406 uint32_t u32Disp;
13407 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13408 u64EffAddr += (int32_t)u32Disp;
13409 }
13410 break;
13411 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13412 }
13413 break;
13414 }
13415 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13416 }
13417
13418 /* Get and add the displacement. */
13419 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13420 {
13421 case 0:
13422 break;
13423 case 1:
13424 {
13425 int8_t i8Disp;
13426 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13427 u64EffAddr += i8Disp;
13428 break;
13429 }
13430 case 2:
13431 {
13432 uint32_t u32Disp;
13433 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13434 u64EffAddr += (int32_t)u32Disp;
13435 break;
13436 }
13437 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13438 }
13439
13440 }
13441
13442 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13443 *pGCPtrEff = u64EffAddr;
13444 else
13445 {
13446 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13447 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13448 }
13449 }
13450
13451 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13452 return VINF_SUCCESS;
13453}
13454
13455
13456#ifdef IEM_WITH_SETJMP
13457/**
13458 * Calculates the effective address of a ModR/M memory operand.
13459 *
13460 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13461 *
13462 * May longjmp on internal error.
13463 *
13464 * @return The effective address.
13465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13466 * @param bRm The ModRM byte.
13467 * @param cbImm The size of any immediate following the
13468 * effective address opcode bytes. Important for
13469 * RIP relative addressing.
13470 */
13471IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13472{
13473 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13474 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13475# define SET_SS_DEF() \
13476 do \
13477 { \
13478 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13479 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13480 } while (0)
13481
13482 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13483 {
13484/** @todo Check the effective address size crap! */
13485 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13486 {
13487 uint16_t u16EffAddr;
13488
13489 /* Handle the disp16 form with no registers first. */
13490 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13491 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13492 else
13493 {
13494 /* Get the displacment. */
13495 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13496 {
13497 case 0: u16EffAddr = 0; break;
13498 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13499 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13500 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13501 }
13502
13503 /* Add the base and index registers to the disp. */
13504 switch (bRm & X86_MODRM_RM_MASK)
13505 {
13506 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13507 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13508 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13509 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13510 case 4: u16EffAddr += pCtx->si; break;
13511 case 5: u16EffAddr += pCtx->di; break;
13512 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13513 case 7: u16EffAddr += pCtx->bx; break;
13514 }
13515 }
13516
13517 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13518 return u16EffAddr;
13519 }
13520
13521 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13522 uint32_t u32EffAddr;
13523
13524 /* Handle the disp32 form with no registers first. */
13525 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13526 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13527 else
13528 {
13529 /* Get the register (or SIB) value. */
13530 switch ((bRm & X86_MODRM_RM_MASK))
13531 {
13532 case 0: u32EffAddr = pCtx->eax; break;
13533 case 1: u32EffAddr = pCtx->ecx; break;
13534 case 2: u32EffAddr = pCtx->edx; break;
13535 case 3: u32EffAddr = pCtx->ebx; break;
13536 case 4: /* SIB */
13537 {
13538 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13539
13540 /* Get the index and scale it. */
13541 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13542 {
13543 case 0: u32EffAddr = pCtx->eax; break;
13544 case 1: u32EffAddr = pCtx->ecx; break;
13545 case 2: u32EffAddr = pCtx->edx; break;
13546 case 3: u32EffAddr = pCtx->ebx; break;
13547 case 4: u32EffAddr = 0; /*none */ break;
13548 case 5: u32EffAddr = pCtx->ebp; break;
13549 case 6: u32EffAddr = pCtx->esi; break;
13550 case 7: u32EffAddr = pCtx->edi; break;
13551 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13552 }
13553 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13554
13555 /* add base */
13556 switch (bSib & X86_SIB_BASE_MASK)
13557 {
13558 case 0: u32EffAddr += pCtx->eax; break;
13559 case 1: u32EffAddr += pCtx->ecx; break;
13560 case 2: u32EffAddr += pCtx->edx; break;
13561 case 3: u32EffAddr += pCtx->ebx; break;
13562 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13563 case 5:
13564 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13565 {
13566 u32EffAddr += pCtx->ebp;
13567 SET_SS_DEF();
13568 }
13569 else
13570 {
13571 uint32_t u32Disp;
13572 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13573 u32EffAddr += u32Disp;
13574 }
13575 break;
13576 case 6: u32EffAddr += pCtx->esi; break;
13577 case 7: u32EffAddr += pCtx->edi; break;
13578 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13579 }
13580 break;
13581 }
13582 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13583 case 6: u32EffAddr = pCtx->esi; break;
13584 case 7: u32EffAddr = pCtx->edi; break;
13585 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13586 }
13587
13588 /* Get and add the displacement. */
13589 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13590 {
13591 case 0:
13592 break;
13593 case 1:
13594 {
13595 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13596 u32EffAddr += i8Disp;
13597 break;
13598 }
13599 case 2:
13600 {
13601 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13602 u32EffAddr += u32Disp;
13603 break;
13604 }
13605 default:
13606 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13607 }
13608 }
13609
13610 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13611 {
13612 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13613 return u32EffAddr;
13614 }
13615 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13616 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13617 return u32EffAddr & UINT16_MAX;
13618 }
13619
13620 uint64_t u64EffAddr;
13621
13622 /* Handle the rip+disp32 form with no registers first. */
13623 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13624 {
13625 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13626 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13627 }
13628 else
13629 {
13630 /* Get the register (or SIB) value. */
13631 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13632 {
13633 case 0: u64EffAddr = pCtx->rax; break;
13634 case 1: u64EffAddr = pCtx->rcx; break;
13635 case 2: u64EffAddr = pCtx->rdx; break;
13636 case 3: u64EffAddr = pCtx->rbx; break;
13637 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13638 case 6: u64EffAddr = pCtx->rsi; break;
13639 case 7: u64EffAddr = pCtx->rdi; break;
13640 case 8: u64EffAddr = pCtx->r8; break;
13641 case 9: u64EffAddr = pCtx->r9; break;
13642 case 10: u64EffAddr = pCtx->r10; break;
13643 case 11: u64EffAddr = pCtx->r11; break;
13644 case 13: u64EffAddr = pCtx->r13; break;
13645 case 14: u64EffAddr = pCtx->r14; break;
13646 case 15: u64EffAddr = pCtx->r15; break;
13647 /* SIB */
13648 case 4:
13649 case 12:
13650 {
13651 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13652
13653 /* Get the index and scale it. */
13654 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13655 {
13656 case 0: u64EffAddr = pCtx->rax; break;
13657 case 1: u64EffAddr = pCtx->rcx; break;
13658 case 2: u64EffAddr = pCtx->rdx; break;
13659 case 3: u64EffAddr = pCtx->rbx; break;
13660 case 4: u64EffAddr = 0; /*none */ break;
13661 case 5: u64EffAddr = pCtx->rbp; break;
13662 case 6: u64EffAddr = pCtx->rsi; break;
13663 case 7: u64EffAddr = pCtx->rdi; break;
13664 case 8: u64EffAddr = pCtx->r8; break;
13665 case 9: u64EffAddr = pCtx->r9; break;
13666 case 10: u64EffAddr = pCtx->r10; break;
13667 case 11: u64EffAddr = pCtx->r11; break;
13668 case 12: u64EffAddr = pCtx->r12; break;
13669 case 13: u64EffAddr = pCtx->r13; break;
13670 case 14: u64EffAddr = pCtx->r14; break;
13671 case 15: u64EffAddr = pCtx->r15; break;
13672 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13673 }
13674 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13675
13676 /* add base */
13677 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13678 {
13679 case 0: u64EffAddr += pCtx->rax; break;
13680 case 1: u64EffAddr += pCtx->rcx; break;
13681 case 2: u64EffAddr += pCtx->rdx; break;
13682 case 3: u64EffAddr += pCtx->rbx; break;
13683 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13684 case 6: u64EffAddr += pCtx->rsi; break;
13685 case 7: u64EffAddr += pCtx->rdi; break;
13686 case 8: u64EffAddr += pCtx->r8; break;
13687 case 9: u64EffAddr += pCtx->r9; break;
13688 case 10: u64EffAddr += pCtx->r10; break;
13689 case 11: u64EffAddr += pCtx->r11; break;
13690 case 12: u64EffAddr += pCtx->r12; break;
13691 case 14: u64EffAddr += pCtx->r14; break;
13692 case 15: u64EffAddr += pCtx->r15; break;
13693 /* complicated encodings */
13694 case 5:
13695 case 13:
13696 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13697 {
13698 if (!pVCpu->iem.s.uRexB)
13699 {
13700 u64EffAddr += pCtx->rbp;
13701 SET_SS_DEF();
13702 }
13703 else
13704 u64EffAddr += pCtx->r13;
13705 }
13706 else
13707 {
13708 uint32_t u32Disp;
13709 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13710 u64EffAddr += (int32_t)u32Disp;
13711 }
13712 break;
13713 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13714 }
13715 break;
13716 }
13717 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13718 }
13719
13720 /* Get and add the displacement. */
13721 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13722 {
13723 case 0:
13724 break;
13725 case 1:
13726 {
13727 int8_t i8Disp;
13728 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13729 u64EffAddr += i8Disp;
13730 break;
13731 }
13732 case 2:
13733 {
13734 uint32_t u32Disp;
13735 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13736 u64EffAddr += (int32_t)u32Disp;
13737 break;
13738 }
13739 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13740 }
13741
13742 }
13743
13744 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13745 {
13746 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13747 return u64EffAddr;
13748 }
13749 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13750 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13751 return u64EffAddr & UINT32_MAX;
13752}
13753#endif /* IEM_WITH_SETJMP */
13754
13755
13756/** @} */
13757
13758
13759
13760/*
13761 * Include the instructions
13762 */
13763#include "IEMAllInstructions.cpp.h"
13764
13765
13766
13767
13768#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13769
13770/**
13771 * Sets up execution verification mode.
13772 */
13773IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13774{
13775 PVMCPU pVCpu = pVCpu;
13776 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13777
13778 /*
13779 * Always note down the address of the current instruction.
13780 */
13781 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13782 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13783
13784 /*
13785 * Enable verification and/or logging.
13786 */
13787 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13788 if ( fNewNoRem
13789 && ( 0
13790#if 0 /* auto enable on first paged protected mode interrupt */
13791 || ( pOrgCtx->eflags.Bits.u1IF
13792 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13793 && TRPMHasTrap(pVCpu)
13794 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13795#endif
13796#if 0
13797 || ( pOrgCtx->cs == 0x10
13798 && ( pOrgCtx->rip == 0x90119e3e
13799 || pOrgCtx->rip == 0x901d9810)
13800#endif
13801#if 0 /* Auto enable DSL - FPU stuff. */
13802 || ( pOrgCtx->cs == 0x10
13803 && (// pOrgCtx->rip == 0xc02ec07f
13804 //|| pOrgCtx->rip == 0xc02ec082
13805 //|| pOrgCtx->rip == 0xc02ec0c9
13806 0
13807 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13808#endif
13809#if 0 /* Auto enable DSL - fstp st0 stuff. */
13810 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13811#endif
13812#if 0
13813 || pOrgCtx->rip == 0x9022bb3a
13814#endif
13815#if 0
13816 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13817#endif
13818#if 0
13819 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13820 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13821#endif
13822#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13823 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13824 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13825 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13826#endif
13827#if 0 /* NT4SP1 - xadd early boot. */
13828 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13829#endif
13830#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13831 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13832#endif
13833#if 0 /* NT4SP1 - cmpxchg (AMD). */
13834 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13835#endif
13836#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13837 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13838#endif
13839#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13840 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13841
13842#endif
13843#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13844 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13845
13846#endif
13847#if 0 /* NT4SP1 - frstor [ecx] */
13848 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13849#endif
13850#if 0 /* xxxxxx - All long mode code. */
13851 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13852#endif
13853#if 0 /* rep movsq linux 3.7 64-bit boot. */
13854 || (pOrgCtx->rip == 0x0000000000100241)
13855#endif
13856#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13857 || (pOrgCtx->rip == 0x000000000215e240)
13858#endif
13859#if 0 /* DOS's size-overridden iret to v8086. */
13860 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13861#endif
13862 )
13863 )
13864 {
13865 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13866 RTLogFlags(NULL, "enabled");
13867 fNewNoRem = false;
13868 }
13869 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13870 {
13871 pVCpu->iem.s.fNoRem = fNewNoRem;
13872 if (!fNewNoRem)
13873 {
13874 LogAlways(("Enabling verification mode!\n"));
13875 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13876 }
13877 else
13878 LogAlways(("Disabling verification mode!\n"));
13879 }
13880
13881 /*
13882 * Switch state.
13883 */
13884 if (IEM_VERIFICATION_ENABLED(pVCpu))
13885 {
13886 static CPUMCTX s_DebugCtx; /* Ugly! */
13887
13888 s_DebugCtx = *pOrgCtx;
13889 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13890 }
13891
13892 /*
13893 * See if there is an interrupt pending in TRPM and inject it if we can.
13894 */
13895 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13896 if ( pOrgCtx->eflags.Bits.u1IF
13897 && TRPMHasTrap(pVCpu)
13898 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13899 {
13900 uint8_t u8TrapNo;
13901 TRPMEVENT enmType;
13902 RTGCUINT uErrCode;
13903 RTGCPTR uCr2;
13904 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13905 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13906 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13907 TRPMResetTrap(pVCpu);
13908 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13909 }
13910
13911 /*
13912 * Reset the counters.
13913 */
13914 pVCpu->iem.s.cIOReads = 0;
13915 pVCpu->iem.s.cIOWrites = 0;
13916 pVCpu->iem.s.fIgnoreRaxRdx = false;
13917 pVCpu->iem.s.fOverlappingMovs = false;
13918 pVCpu->iem.s.fProblematicMemory = false;
13919 pVCpu->iem.s.fUndefinedEFlags = 0;
13920
13921 if (IEM_VERIFICATION_ENABLED(pVCpu))
13922 {
13923 /*
13924 * Free all verification records.
13925 */
13926 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13927 pVCpu->iem.s.pIemEvtRecHead = NULL;
13928 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13929 do
13930 {
13931 while (pEvtRec)
13932 {
13933 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13934 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13935 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13936 pEvtRec = pNext;
13937 }
13938 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13939 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13940 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13941 } while (pEvtRec);
13942 }
13943}
13944
13945
13946/**
13947 * Allocate an event record.
13948 * @returns Pointer to a record.
13949 */
13950IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13951{
13952 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13953 return NULL;
13954
13955 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13956 if (pEvtRec)
13957 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13958 else
13959 {
13960 if (!pVCpu->iem.s.ppIemEvtRecNext)
13961 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13962
13963 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13964 if (!pEvtRec)
13965 return NULL;
13966 }
13967 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13968 pEvtRec->pNext = NULL;
13969 return pEvtRec;
13970}
13971
13972
13973/**
13974 * IOMMMIORead notification.
13975 */
13976VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
13977{
13978 PVMCPU pVCpu = VMMGetCpu(pVM);
13979 if (!pVCpu)
13980 return;
13981 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13982 if (!pEvtRec)
13983 return;
13984 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
13985 pEvtRec->u.RamRead.GCPhys = GCPhys;
13986 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
13987 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13988 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13989}
13990
13991
13992/**
13993 * IOMMMIOWrite notification.
13994 */
13995VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
13996{
13997 PVMCPU pVCpu = VMMGetCpu(pVM);
13998 if (!pVCpu)
13999 return;
14000 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14001 if (!pEvtRec)
14002 return;
14003 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
14004 pEvtRec->u.RamWrite.GCPhys = GCPhys;
14005 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
14006 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
14007 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
14008 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
14009 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
14010 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14011 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14012}
14013
14014
14015/**
14016 * IOMIOPortRead notification.
14017 */
14018VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
14019{
14020 PVMCPU pVCpu = VMMGetCpu(pVM);
14021 if (!pVCpu)
14022 return;
14023 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14024 if (!pEvtRec)
14025 return;
14026 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14027 pEvtRec->u.IOPortRead.Port = Port;
14028 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14029 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14030 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14031}
14032
14033/**
14034 * IOMIOPortWrite notification.
14035 */
14036VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14037{
14038 PVMCPU pVCpu = VMMGetCpu(pVM);
14039 if (!pVCpu)
14040 return;
14041 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14042 if (!pEvtRec)
14043 return;
14044 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14045 pEvtRec->u.IOPortWrite.Port = Port;
14046 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14047 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14048 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14049 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14050}
14051
14052
14053VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
14054{
14055 PVMCPU pVCpu = VMMGetCpu(pVM);
14056 if (!pVCpu)
14057 return;
14058 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14059 if (!pEvtRec)
14060 return;
14061 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
14062 pEvtRec->u.IOPortStrRead.Port = Port;
14063 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
14064 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
14065 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14066 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14067}
14068
14069
14070VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
14071{
14072 PVMCPU pVCpu = VMMGetCpu(pVM);
14073 if (!pVCpu)
14074 return;
14075 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14076 if (!pEvtRec)
14077 return;
14078 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
14079 pEvtRec->u.IOPortStrWrite.Port = Port;
14080 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
14081 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
14082 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
14083 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
14084}
14085
14086
14087/**
14088 * Fakes and records an I/O port read.
14089 *
14090 * @returns VINF_SUCCESS.
14091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14092 * @param Port The I/O port.
14093 * @param pu32Value Where to store the fake value.
14094 * @param cbValue The size of the access.
14095 */
14096IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14097{
14098 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14099 if (pEvtRec)
14100 {
14101 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
14102 pEvtRec->u.IOPortRead.Port = Port;
14103 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
14104 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14105 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14106 }
14107 pVCpu->iem.s.cIOReads++;
14108 *pu32Value = 0xcccccccc;
14109 return VINF_SUCCESS;
14110}
14111
14112
14113/**
14114 * Fakes and records an I/O port write.
14115 *
14116 * @returns VINF_SUCCESS.
14117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14118 * @param Port The I/O port.
14119 * @param u32Value The value being written.
14120 * @param cbValue The size of the access.
14121 */
14122IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14123{
14124 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
14125 if (pEvtRec)
14126 {
14127 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
14128 pEvtRec->u.IOPortWrite.Port = Port;
14129 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
14130 pEvtRec->u.IOPortWrite.u32Value = u32Value;
14131 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
14132 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
14133 }
14134 pVCpu->iem.s.cIOWrites++;
14135 return VINF_SUCCESS;
14136}
14137
14138
14139/**
14140 * Used to add extra details about a stub case.
14141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14142 */
14143IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
14144{
14145 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14146 PVM pVM = pVCpu->CTX_SUFF(pVM);
14147 PVMCPU pVCpu = pVCpu;
14148 char szRegs[4096];
14149 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
14150 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
14151 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
14152 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
14153 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
14154 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
14155 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
14156 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
14157 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
14158 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
14159 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
14160 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
14161 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
14162 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
14163 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
14164 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
14165 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
14166 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
14167 " efer=%016VR{efer}\n"
14168 " pat=%016VR{pat}\n"
14169 " sf_mask=%016VR{sf_mask}\n"
14170 "krnl_gs_base=%016VR{krnl_gs_base}\n"
14171 " lstar=%016VR{lstar}\n"
14172 " star=%016VR{star} cstar=%016VR{cstar}\n"
14173 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
14174 );
14175
14176 char szInstr1[256];
14177 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
14178 DBGF_DISAS_FLAGS_DEFAULT_MODE,
14179 szInstr1, sizeof(szInstr1), NULL);
14180 char szInstr2[256];
14181 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
14182 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14183 szInstr2, sizeof(szInstr2), NULL);
14184
14185 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
14186}
14187
14188
14189/**
14190 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
14191 * dump to the assertion info.
14192 *
14193 * @param pEvtRec The record to dump.
14194 */
14195IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
14196{
14197 switch (pEvtRec->enmEvent)
14198 {
14199 case IEMVERIFYEVENT_IOPORT_READ:
14200 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
14201 pEvtRec->u.IOPortWrite.Port,
14202 pEvtRec->u.IOPortWrite.cbValue);
14203 break;
14204 case IEMVERIFYEVENT_IOPORT_WRITE:
14205 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
14206 pEvtRec->u.IOPortWrite.Port,
14207 pEvtRec->u.IOPortWrite.cbValue,
14208 pEvtRec->u.IOPortWrite.u32Value);
14209 break;
14210 case IEMVERIFYEVENT_IOPORT_STR_READ:
14211 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
14212 pEvtRec->u.IOPortStrWrite.Port,
14213 pEvtRec->u.IOPortStrWrite.cbValue,
14214 pEvtRec->u.IOPortStrWrite.cTransfers);
14215 break;
14216 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14217 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
14218 pEvtRec->u.IOPortStrWrite.Port,
14219 pEvtRec->u.IOPortStrWrite.cbValue,
14220 pEvtRec->u.IOPortStrWrite.cTransfers);
14221 break;
14222 case IEMVERIFYEVENT_RAM_READ:
14223 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
14224 pEvtRec->u.RamRead.GCPhys,
14225 pEvtRec->u.RamRead.cb);
14226 break;
14227 case IEMVERIFYEVENT_RAM_WRITE:
14228 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
14229 pEvtRec->u.RamWrite.GCPhys,
14230 pEvtRec->u.RamWrite.cb,
14231 (int)pEvtRec->u.RamWrite.cb,
14232 pEvtRec->u.RamWrite.ab);
14233 break;
14234 default:
14235 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14236 break;
14237 }
14238}
14239
14240
14241/**
14242 * Raises an assertion on the specified record, showing the given message with
14243 * a record dump attached.
14244 *
14245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14246 * @param pEvtRec1 The first record.
14247 * @param pEvtRec2 The second record.
14248 * @param pszMsg The message explaining why we're asserting.
14249 */
14250IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14251{
14252 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14253 iemVerifyAssertAddRecordDump(pEvtRec1);
14254 iemVerifyAssertAddRecordDump(pEvtRec2);
14255 iemVerifyAssertMsg2(pVCpu);
14256 RTAssertPanic();
14257}
14258
14259
14260/**
14261 * Raises an assertion on the specified record, showing the given message with
14262 * a record dump attached.
14263 *
14264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14265 * @param pEvtRec1 The first record.
14266 * @param pszMsg The message explaining why we're asserting.
14267 */
14268IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14269{
14270 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14271 iemVerifyAssertAddRecordDump(pEvtRec);
14272 iemVerifyAssertMsg2(pVCpu);
14273 RTAssertPanic();
14274}
14275
14276
14277/**
14278 * Verifies a write record.
14279 *
14280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14281 * @param pEvtRec The write record.
14282 * @param fRem Set if REM was doing the other executing. If clear
14283 * it was HM.
14284 */
14285IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14286{
14287 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14288 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14289 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14290 if ( RT_FAILURE(rc)
14291 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14292 {
14293 /* fend off ins */
14294 if ( !pVCpu->iem.s.cIOReads
14295 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14296 || ( pEvtRec->u.RamWrite.cb != 1
14297 && pEvtRec->u.RamWrite.cb != 2
14298 && pEvtRec->u.RamWrite.cb != 4) )
14299 {
14300 /* fend off ROMs and MMIO */
14301 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14302 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14303 {
14304 /* fend off fxsave */
14305 if (pEvtRec->u.RamWrite.cb != 512)
14306 {
14307 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14308 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14309 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14310 RTAssertMsg2Add("%s: %.*Rhxs\n"
14311 "iem: %.*Rhxs\n",
14312 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14313 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14314 iemVerifyAssertAddRecordDump(pEvtRec);
14315 iemVerifyAssertMsg2(pVCpu);
14316 RTAssertPanic();
14317 }
14318 }
14319 }
14320 }
14321
14322}
14323
14324/**
14325 * Performs the post-execution verfication checks.
14326 */
14327IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14328{
14329 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14330 return rcStrictIem;
14331
14332 /*
14333 * Switch back the state.
14334 */
14335 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14336 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14337 Assert(pOrgCtx != pDebugCtx);
14338 IEM_GET_CTX(pVCpu) = pOrgCtx;
14339
14340 /*
14341 * Execute the instruction in REM.
14342 */
14343 bool fRem = false;
14344 PVM pVM = pVCpu->CTX_SUFF(pVM);
14345 PVMCPU pVCpu = pVCpu;
14346 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14347#ifdef IEM_VERIFICATION_MODE_FULL_HM
14348 if ( HMIsEnabled(pVM)
14349 && pVCpu->iem.s.cIOReads == 0
14350 && pVCpu->iem.s.cIOWrites == 0
14351 && !pVCpu->iem.s.fProblematicMemory)
14352 {
14353 uint64_t uStartRip = pOrgCtx->rip;
14354 unsigned iLoops = 0;
14355 do
14356 {
14357 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14358 iLoops++;
14359 } while ( rc == VINF_SUCCESS
14360 || ( rc == VINF_EM_DBG_STEPPED
14361 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14362 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14363 || ( pOrgCtx->rip != pDebugCtx->rip
14364 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14365 && iLoops < 8) );
14366 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14367 rc = VINF_SUCCESS;
14368 }
14369#endif
14370 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14371 || rc == VINF_IOM_R3_IOPORT_READ
14372 || rc == VINF_IOM_R3_IOPORT_WRITE
14373 || rc == VINF_IOM_R3_MMIO_READ
14374 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14375 || rc == VINF_IOM_R3_MMIO_WRITE
14376 || rc == VINF_CPUM_R3_MSR_READ
14377 || rc == VINF_CPUM_R3_MSR_WRITE
14378 || rc == VINF_EM_RESCHEDULE
14379 )
14380 {
14381 EMRemLock(pVM);
14382 rc = REMR3EmulateInstruction(pVM, pVCpu);
14383 AssertRC(rc);
14384 EMRemUnlock(pVM);
14385 fRem = true;
14386 }
14387
14388# if 1 /* Skip unimplemented instructions for now. */
14389 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14390 {
14391 IEM_GET_CTX(pVCpu) = pOrgCtx;
14392 if (rc == VINF_EM_DBG_STEPPED)
14393 return VINF_SUCCESS;
14394 return rc;
14395 }
14396# endif
14397
14398 /*
14399 * Compare the register states.
14400 */
14401 unsigned cDiffs = 0;
14402 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14403 {
14404 //Log(("REM and IEM ends up with different registers!\n"));
14405 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14406
14407# define CHECK_FIELD(a_Field) \
14408 do \
14409 { \
14410 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14411 { \
14412 switch (sizeof(pOrgCtx->a_Field)) \
14413 { \
14414 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14415 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14416 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14417 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14418 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14419 } \
14420 cDiffs++; \
14421 } \
14422 } while (0)
14423# define CHECK_XSTATE_FIELD(a_Field) \
14424 do \
14425 { \
14426 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14427 { \
14428 switch (sizeof(pOrgXState->a_Field)) \
14429 { \
14430 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14431 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14432 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14433 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14434 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14435 } \
14436 cDiffs++; \
14437 } \
14438 } while (0)
14439
14440# define CHECK_BIT_FIELD(a_Field) \
14441 do \
14442 { \
14443 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14444 { \
14445 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14446 cDiffs++; \
14447 } \
14448 } while (0)
14449
14450# define CHECK_SEL(a_Sel) \
14451 do \
14452 { \
14453 CHECK_FIELD(a_Sel.Sel); \
14454 CHECK_FIELD(a_Sel.Attr.u); \
14455 CHECK_FIELD(a_Sel.u64Base); \
14456 CHECK_FIELD(a_Sel.u32Limit); \
14457 CHECK_FIELD(a_Sel.fFlags); \
14458 } while (0)
14459
14460 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14461 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14462
14463#if 1 /* The recompiler doesn't update these the intel way. */
14464 if (fRem)
14465 {
14466 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14467 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14468 pOrgXState->x87.CS = pDebugXState->x87.CS;
14469 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14470 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14471 pOrgXState->x87.DS = pDebugXState->x87.DS;
14472 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14473 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14474 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14475 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14476 }
14477#endif
14478 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14479 {
14480 RTAssertMsg2Weak(" the FPU state differs\n");
14481 cDiffs++;
14482 CHECK_XSTATE_FIELD(x87.FCW);
14483 CHECK_XSTATE_FIELD(x87.FSW);
14484 CHECK_XSTATE_FIELD(x87.FTW);
14485 CHECK_XSTATE_FIELD(x87.FOP);
14486 CHECK_XSTATE_FIELD(x87.FPUIP);
14487 CHECK_XSTATE_FIELD(x87.CS);
14488 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14489 CHECK_XSTATE_FIELD(x87.FPUDP);
14490 CHECK_XSTATE_FIELD(x87.DS);
14491 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14492 CHECK_XSTATE_FIELD(x87.MXCSR);
14493 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14494 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14495 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14496 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14497 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14498 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14499 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14500 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14501 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14502 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14503 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14504 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14505 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14506 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14507 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14508 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14509 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14510 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14511 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14512 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14513 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14514 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14515 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14516 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14517 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14518 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14519 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14520 }
14521 CHECK_FIELD(rip);
14522 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14523 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14524 {
14525 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14526 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14527 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14528 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14529 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14530 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14531 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14532 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14533 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14534 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14535 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14536 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14537 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14538 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14539 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14540 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14541 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14542 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14543 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14544 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14545 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14546 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14547 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14548 }
14549
14550 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14551 CHECK_FIELD(rax);
14552 CHECK_FIELD(rcx);
14553 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14554 CHECK_FIELD(rdx);
14555 CHECK_FIELD(rbx);
14556 CHECK_FIELD(rsp);
14557 CHECK_FIELD(rbp);
14558 CHECK_FIELD(rsi);
14559 CHECK_FIELD(rdi);
14560 CHECK_FIELD(r8);
14561 CHECK_FIELD(r9);
14562 CHECK_FIELD(r10);
14563 CHECK_FIELD(r11);
14564 CHECK_FIELD(r12);
14565 CHECK_FIELD(r13);
14566 CHECK_SEL(cs);
14567 CHECK_SEL(ss);
14568 CHECK_SEL(ds);
14569 CHECK_SEL(es);
14570 CHECK_SEL(fs);
14571 CHECK_SEL(gs);
14572 CHECK_FIELD(cr0);
14573
14574 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14575 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14576 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14577 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14578 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14579 {
14580 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14581 { /* ignore */ }
14582 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14583 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14584 && fRem)
14585 { /* ignore */ }
14586 else
14587 CHECK_FIELD(cr2);
14588 }
14589 CHECK_FIELD(cr3);
14590 CHECK_FIELD(cr4);
14591 CHECK_FIELD(dr[0]);
14592 CHECK_FIELD(dr[1]);
14593 CHECK_FIELD(dr[2]);
14594 CHECK_FIELD(dr[3]);
14595 CHECK_FIELD(dr[6]);
14596 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14597 CHECK_FIELD(dr[7]);
14598 CHECK_FIELD(gdtr.cbGdt);
14599 CHECK_FIELD(gdtr.pGdt);
14600 CHECK_FIELD(idtr.cbIdt);
14601 CHECK_FIELD(idtr.pIdt);
14602 CHECK_SEL(ldtr);
14603 CHECK_SEL(tr);
14604 CHECK_FIELD(SysEnter.cs);
14605 CHECK_FIELD(SysEnter.eip);
14606 CHECK_FIELD(SysEnter.esp);
14607 CHECK_FIELD(msrEFER);
14608 CHECK_FIELD(msrSTAR);
14609 CHECK_FIELD(msrPAT);
14610 CHECK_FIELD(msrLSTAR);
14611 CHECK_FIELD(msrCSTAR);
14612 CHECK_FIELD(msrSFMASK);
14613 CHECK_FIELD(msrKERNELGSBASE);
14614
14615 if (cDiffs != 0)
14616 {
14617 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14618 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14619 RTAssertPanic();
14620 static bool volatile s_fEnterDebugger = true;
14621 if (s_fEnterDebugger)
14622 DBGFSTOP(pVM);
14623
14624# if 1 /* Ignore unimplemented instructions for now. */
14625 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14626 rcStrictIem = VINF_SUCCESS;
14627# endif
14628 }
14629# undef CHECK_FIELD
14630# undef CHECK_BIT_FIELD
14631 }
14632
14633 /*
14634 * If the register state compared fine, check the verification event
14635 * records.
14636 */
14637 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14638 {
14639 /*
14640 * Compare verficiation event records.
14641 * - I/O port accesses should be a 1:1 match.
14642 */
14643 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14644 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14645 while (pIemRec && pOtherRec)
14646 {
14647 /* Since we might miss RAM writes and reads, ignore reads and check
14648 that any written memory is the same extra ones. */
14649 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14650 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14651 && pIemRec->pNext)
14652 {
14653 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14654 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14655 pIemRec = pIemRec->pNext;
14656 }
14657
14658 /* Do the compare. */
14659 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14660 {
14661 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14662 break;
14663 }
14664 bool fEquals;
14665 switch (pIemRec->enmEvent)
14666 {
14667 case IEMVERIFYEVENT_IOPORT_READ:
14668 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14669 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14670 break;
14671 case IEMVERIFYEVENT_IOPORT_WRITE:
14672 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14673 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14674 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14675 break;
14676 case IEMVERIFYEVENT_IOPORT_STR_READ:
14677 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14678 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14679 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14680 break;
14681 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14682 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14683 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14684 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14685 break;
14686 case IEMVERIFYEVENT_RAM_READ:
14687 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14688 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14689 break;
14690 case IEMVERIFYEVENT_RAM_WRITE:
14691 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14692 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14693 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14694 break;
14695 default:
14696 fEquals = false;
14697 break;
14698 }
14699 if (!fEquals)
14700 {
14701 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14702 break;
14703 }
14704
14705 /* advance */
14706 pIemRec = pIemRec->pNext;
14707 pOtherRec = pOtherRec->pNext;
14708 }
14709
14710 /* Ignore extra writes and reads. */
14711 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14712 {
14713 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14714 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14715 pIemRec = pIemRec->pNext;
14716 }
14717 if (pIemRec != NULL)
14718 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14719 else if (pOtherRec != NULL)
14720 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14721 }
14722 IEM_GET_CTX(pVCpu) = pOrgCtx;
14723
14724 return rcStrictIem;
14725}
14726
14727#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14728
14729/* stubs */
14730IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14731{
14732 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14733 return VERR_INTERNAL_ERROR;
14734}
14735
14736IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14737{
14738 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14739 return VERR_INTERNAL_ERROR;
14740}
14741
14742#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14743
14744
14745#ifdef LOG_ENABLED
14746/**
14747 * Logs the current instruction.
14748 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14749 * @param pCtx The current CPU context.
14750 * @param fSameCtx Set if we have the same context information as the VMM,
14751 * clear if we may have already executed an instruction in
14752 * our debug context. When clear, we assume IEMCPU holds
14753 * valid CPU mode info.
14754 */
14755IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14756{
14757# ifdef IN_RING3
14758 if (LogIs2Enabled())
14759 {
14760 char szInstr[256];
14761 uint32_t cbInstr = 0;
14762 if (fSameCtx)
14763 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14764 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14765 szInstr, sizeof(szInstr), &cbInstr);
14766 else
14767 {
14768 uint32_t fFlags = 0;
14769 switch (pVCpu->iem.s.enmCpuMode)
14770 {
14771 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14772 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14773 case IEMMODE_16BIT:
14774 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14775 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14776 else
14777 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14778 break;
14779 }
14780 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14781 szInstr, sizeof(szInstr), &cbInstr);
14782 }
14783
14784 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14785 Log2(("****\n"
14786 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14787 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14788 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14789 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14790 " %s\n"
14791 ,
14792 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14793 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14794 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14795 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14796 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14797 szInstr));
14798
14799 if (LogIs3Enabled())
14800 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14801 }
14802 else
14803# endif
14804 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14805 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14806 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14807}
14808#endif
14809
14810
14811/**
14812 * Makes status code addjustments (pass up from I/O and access handler)
14813 * as well as maintaining statistics.
14814 *
14815 * @returns Strict VBox status code to pass up.
14816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14817 * @param rcStrict The status from executing an instruction.
14818 */
14819DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14820{
14821 if (rcStrict != VINF_SUCCESS)
14822 {
14823 if (RT_SUCCESS(rcStrict))
14824 {
14825 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14826 || rcStrict == VINF_IOM_R3_IOPORT_READ
14827 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14828 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14829 || rcStrict == VINF_IOM_R3_MMIO_READ
14830 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14831 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14832 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14833 || rcStrict == VINF_CPUM_R3_MSR_READ
14834 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14835 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14836 || rcStrict == VINF_EM_RAW_TO_R3
14837 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14838 /* raw-mode / virt handlers only: */
14839 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14840 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14841 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14842 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14843 || rcStrict == VINF_SELM_SYNC_GDT
14844 || rcStrict == VINF_CSAM_PENDING_ACTION
14845 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14846 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14847/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14848 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14849 if (rcPassUp == VINF_SUCCESS)
14850 pVCpu->iem.s.cRetInfStatuses++;
14851 else if ( rcPassUp < VINF_EM_FIRST
14852 || rcPassUp > VINF_EM_LAST
14853 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14854 {
14855 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14856 pVCpu->iem.s.cRetPassUpStatus++;
14857 rcStrict = rcPassUp;
14858 }
14859 else
14860 {
14861 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14862 pVCpu->iem.s.cRetInfStatuses++;
14863 }
14864 }
14865 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14866 pVCpu->iem.s.cRetAspectNotImplemented++;
14867 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14868 pVCpu->iem.s.cRetInstrNotImplemented++;
14869#ifdef IEM_VERIFICATION_MODE_FULL
14870 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14871 rcStrict = VINF_SUCCESS;
14872#endif
14873 else
14874 pVCpu->iem.s.cRetErrStatuses++;
14875 }
14876 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14877 {
14878 pVCpu->iem.s.cRetPassUpStatus++;
14879 rcStrict = pVCpu->iem.s.rcPassUp;
14880 }
14881
14882 return rcStrict;
14883}
14884
14885
14886/**
14887 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14888 * IEMExecOneWithPrefetchedByPC.
14889 *
14890 * Similar code is found in IEMExecLots.
14891 *
14892 * @return Strict VBox status code.
14893 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14895 * @param fExecuteInhibit If set, execute the instruction following CLI,
14896 * POP SS and MOV SS,GR.
14897 */
14898DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14899{
14900#ifdef IEM_WITH_SETJMP
14901 VBOXSTRICTRC rcStrict;
14902 jmp_buf JmpBuf;
14903 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14904 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14905 if ((rcStrict = setjmp(JmpBuf)) == 0)
14906 {
14907 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14908 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14909 }
14910 else
14911 pVCpu->iem.s.cLongJumps++;
14912 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14913#else
14914 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14915 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14916#endif
14917 if (rcStrict == VINF_SUCCESS)
14918 pVCpu->iem.s.cInstructions++;
14919 if (pVCpu->iem.s.cActiveMappings > 0)
14920 {
14921 Assert(rcStrict != VINF_SUCCESS);
14922 iemMemRollback(pVCpu);
14923 }
14924//#ifdef DEBUG
14925// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14926//#endif
14927
14928 /* Execute the next instruction as well if a cli, pop ss or
14929 mov ss, Gr has just completed successfully. */
14930 if ( fExecuteInhibit
14931 && rcStrict == VINF_SUCCESS
14932 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14933 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14934 {
14935 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14936 if (rcStrict == VINF_SUCCESS)
14937 {
14938#ifdef LOG_ENABLED
14939 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14940#endif
14941#ifdef IEM_WITH_SETJMP
14942 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14943 if ((rcStrict = setjmp(JmpBuf)) == 0)
14944 {
14945 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14946 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14947 }
14948 else
14949 pVCpu->iem.s.cLongJumps++;
14950 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14951#else
14952 IEM_OPCODE_GET_NEXT_U8(&b);
14953 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14954#endif
14955 if (rcStrict == VINF_SUCCESS)
14956 pVCpu->iem.s.cInstructions++;
14957 if (pVCpu->iem.s.cActiveMappings > 0)
14958 {
14959 Assert(rcStrict != VINF_SUCCESS);
14960 iemMemRollback(pVCpu);
14961 }
14962 }
14963 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14964 }
14965
14966 /*
14967 * Return value fiddling, statistics and sanity assertions.
14968 */
14969 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14970
14971 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14972 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14973#if defined(IEM_VERIFICATION_MODE_FULL)
14974 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14975 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14976 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14977 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14978#endif
14979 return rcStrict;
14980}
14981
14982
14983#ifdef IN_RC
14984/**
14985 * Re-enters raw-mode or ensure we return to ring-3.
14986 *
14987 * @returns rcStrict, maybe modified.
14988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14989 * @param pCtx The current CPU context.
14990 * @param rcStrict The status code returne by the interpreter.
14991 */
14992DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
14993{
14994 if ( !pVCpu->iem.s.fInPatchCode
14995 && ( rcStrict == VINF_SUCCESS
14996 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14997 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14998 {
14999 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
15000 CPUMRawEnter(pVCpu);
15001 else
15002 {
15003 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
15004 rcStrict = VINF_EM_RESCHEDULE;
15005 }
15006 }
15007 return rcStrict;
15008}
15009#endif
15010
15011
15012/**
15013 * Execute one instruction.
15014 *
15015 * @return Strict VBox status code.
15016 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15017 */
15018VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
15019{
15020#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15021 if (++pVCpu->iem.s.cVerifyDepth == 1)
15022 iemExecVerificationModeSetup(pVCpu);
15023#endif
15024#ifdef LOG_ENABLED
15025 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15026 iemLogCurInstr(pVCpu, pCtx, true);
15027#endif
15028
15029 /*
15030 * Do the decoding and emulation.
15031 */
15032 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15033 if (rcStrict == VINF_SUCCESS)
15034 rcStrict = iemExecOneInner(pVCpu, true);
15035
15036#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15037 /*
15038 * Assert some sanity.
15039 */
15040 if (pVCpu->iem.s.cVerifyDepth == 1)
15041 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15042 pVCpu->iem.s.cVerifyDepth--;
15043#endif
15044#ifdef IN_RC
15045 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15046#endif
15047 if (rcStrict != VINF_SUCCESS)
15048 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15049 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15050 return rcStrict;
15051}
15052
15053
15054VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15055{
15056 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15057 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15058
15059 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15060 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15061 if (rcStrict == VINF_SUCCESS)
15062 {
15063 rcStrict = iemExecOneInner(pVCpu, true);
15064 if (pcbWritten)
15065 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15066 }
15067
15068#ifdef IN_RC
15069 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15070#endif
15071 return rcStrict;
15072}
15073
15074
15075VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15076 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15077{
15078 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15079 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15080
15081 VBOXSTRICTRC rcStrict;
15082 if ( cbOpcodeBytes
15083 && pCtx->rip == OpcodeBytesPC)
15084 {
15085 iemInitDecoder(pVCpu, false);
15086#ifdef IEM_WITH_CODE_TLB
15087 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15088 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15089 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15090 pVCpu->iem.s.offCurInstrStart = 0;
15091 pVCpu->iem.s.offInstrNextByte = 0;
15092#else
15093 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15094 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15095#endif
15096 rcStrict = VINF_SUCCESS;
15097 }
15098 else
15099 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15100 if (rcStrict == VINF_SUCCESS)
15101 {
15102 rcStrict = iemExecOneInner(pVCpu, true);
15103 }
15104
15105#ifdef IN_RC
15106 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15107#endif
15108 return rcStrict;
15109}
15110
15111
15112VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
15113{
15114 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15115 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15116
15117 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15118 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15119 if (rcStrict == VINF_SUCCESS)
15120 {
15121 rcStrict = iemExecOneInner(pVCpu, false);
15122 if (pcbWritten)
15123 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15124 }
15125
15126#ifdef IN_RC
15127 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15128#endif
15129 return rcStrict;
15130}
15131
15132
15133VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15134 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
15135{
15136 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15137 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15138
15139 VBOXSTRICTRC rcStrict;
15140 if ( cbOpcodeBytes
15141 && pCtx->rip == OpcodeBytesPC)
15142 {
15143 iemInitDecoder(pVCpu, true);
15144#ifdef IEM_WITH_CODE_TLB
15145 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15146 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15147 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15148 pVCpu->iem.s.offCurInstrStart = 0;
15149 pVCpu->iem.s.offInstrNextByte = 0;
15150#else
15151 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15152 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15153#endif
15154 rcStrict = VINF_SUCCESS;
15155 }
15156 else
15157 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15158 if (rcStrict == VINF_SUCCESS)
15159 rcStrict = iemExecOneInner(pVCpu, false);
15160
15161#ifdef IN_RC
15162 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15163#endif
15164 return rcStrict;
15165}
15166
15167
15168/**
15169 * For debugging DISGetParamSize, may come in handy.
15170 *
15171 * @returns Strict VBox status code.
15172 * @param pVCpu The cross context virtual CPU structure of the
15173 * calling EMT.
15174 * @param pCtxCore The context core structure.
15175 * @param OpcodeBytesPC The PC of the opcode bytes.
15176 * @param pvOpcodeBytes Prefeched opcode bytes.
15177 * @param cbOpcodeBytes Number of prefetched bytes.
15178 * @param pcbWritten Where to return the number of bytes written.
15179 * Optional.
15180 */
15181VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
15182 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
15183 uint32_t *pcbWritten)
15184{
15185 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15186 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
15187
15188 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
15189 VBOXSTRICTRC rcStrict;
15190 if ( cbOpcodeBytes
15191 && pCtx->rip == OpcodeBytesPC)
15192 {
15193 iemInitDecoder(pVCpu, true);
15194#ifdef IEM_WITH_CODE_TLB
15195 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
15196 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
15197 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
15198 pVCpu->iem.s.offCurInstrStart = 0;
15199 pVCpu->iem.s.offInstrNextByte = 0;
15200#else
15201 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
15202 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
15203#endif
15204 rcStrict = VINF_SUCCESS;
15205 }
15206 else
15207 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
15208 if (rcStrict == VINF_SUCCESS)
15209 {
15210 rcStrict = iemExecOneInner(pVCpu, false);
15211 if (pcbWritten)
15212 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
15213 }
15214
15215#ifdef IN_RC
15216 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
15217#endif
15218 return rcStrict;
15219}
15220
15221
15222VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
15223{
15224 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
15225
15226#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
15227 /*
15228 * See if there is an interrupt pending in TRPM, inject it if we can.
15229 */
15230 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15231# ifdef IEM_VERIFICATION_MODE_FULL
15232 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15233# endif
15234 if ( pCtx->eflags.Bits.u1IF
15235 && TRPMHasTrap(pVCpu)
15236 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15237 {
15238 uint8_t u8TrapNo;
15239 TRPMEVENT enmType;
15240 RTGCUINT uErrCode;
15241 RTGCPTR uCr2;
15242 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15243 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15244 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15245 TRPMResetTrap(pVCpu);
15246 }
15247
15248 /*
15249 * Log the state.
15250 */
15251# ifdef LOG_ENABLED
15252 iemLogCurInstr(pVCpu, pCtx, true);
15253# endif
15254
15255 /*
15256 * Do the decoding and emulation.
15257 */
15258 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15259 if (rcStrict == VINF_SUCCESS)
15260 rcStrict = iemExecOneInner(pVCpu, true);
15261
15262 /*
15263 * Assert some sanity.
15264 */
15265 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15266
15267 /*
15268 * Log and return.
15269 */
15270 if (rcStrict != VINF_SUCCESS)
15271 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15272 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15273 if (pcInstructions)
15274 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15275 return rcStrict;
15276
15277#else /* Not verification mode */
15278
15279 /*
15280 * See if there is an interrupt pending in TRPM, inject it if we can.
15281 */
15282 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15283# ifdef IEM_VERIFICATION_MODE_FULL
15284 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15285# endif
15286 if ( pCtx->eflags.Bits.u1IF
15287 && TRPMHasTrap(pVCpu)
15288 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15289 {
15290 uint8_t u8TrapNo;
15291 TRPMEVENT enmType;
15292 RTGCUINT uErrCode;
15293 RTGCPTR uCr2;
15294 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15295 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15296 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15297 TRPMResetTrap(pVCpu);
15298 }
15299
15300 /*
15301 * Initial decoder init w/ prefetch, then setup setjmp.
15302 */
15303 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15304 if (rcStrict == VINF_SUCCESS)
15305 {
15306# ifdef IEM_WITH_SETJMP
15307 jmp_buf JmpBuf;
15308 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15309 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15310 pVCpu->iem.s.cActiveMappings = 0;
15311 if ((rcStrict = setjmp(JmpBuf)) == 0)
15312# endif
15313 {
15314 /*
15315 * The run loop. We limit ourselves to 4096 instructions right now.
15316 */
15317 PVM pVM = pVCpu->CTX_SUFF(pVM);
15318 uint32_t cInstr = 4096;
15319 for (;;)
15320 {
15321 /*
15322 * Log the state.
15323 */
15324# ifdef LOG_ENABLED
15325 iemLogCurInstr(pVCpu, pCtx, true);
15326# endif
15327
15328 /*
15329 * Do the decoding and emulation.
15330 */
15331 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15332 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15333 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15334 {
15335 Assert(pVCpu->iem.s.cActiveMappings == 0);
15336 pVCpu->iem.s.cInstructions++;
15337 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15338 {
15339 uint32_t fCpu = pVCpu->fLocalForcedActions
15340 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15341 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15342 | VMCPU_FF_TLB_FLUSH
15343# ifdef VBOX_WITH_RAW_MODE
15344 | VMCPU_FF_TRPM_SYNC_IDT
15345 | VMCPU_FF_SELM_SYNC_TSS
15346 | VMCPU_FF_SELM_SYNC_GDT
15347 | VMCPU_FF_SELM_SYNC_LDT
15348# endif
15349 | VMCPU_FF_INHIBIT_INTERRUPTS
15350 | VMCPU_FF_BLOCK_NMIS
15351 | VMCPU_FF_UNHALT ));
15352
15353 if (RT_LIKELY( ( !fCpu
15354 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15355 && !pCtx->rflags.Bits.u1IF) )
15356 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15357 {
15358 if (cInstr-- > 0)
15359 {
15360 Assert(pVCpu->iem.s.cActiveMappings == 0);
15361 iemReInitDecoder(pVCpu);
15362 continue;
15363 }
15364 }
15365 }
15366 Assert(pVCpu->iem.s.cActiveMappings == 0);
15367 }
15368 else if (pVCpu->iem.s.cActiveMappings > 0)
15369 iemMemRollback(pVCpu);
15370 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15371 break;
15372 }
15373 }
15374# ifdef IEM_WITH_SETJMP
15375 else
15376 {
15377 if (pVCpu->iem.s.cActiveMappings > 0)
15378 iemMemRollback(pVCpu);
15379 pVCpu->iem.s.cLongJumps++;
15380 }
15381 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15382# endif
15383
15384 /*
15385 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15386 */
15387 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15389# if defined(IEM_VERIFICATION_MODE_FULL)
15390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15391 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15392 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15393 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15394# endif
15395 }
15396
15397 /*
15398 * Maybe re-enter raw-mode and log.
15399 */
15400# ifdef IN_RC
15401 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15402# endif
15403 if (rcStrict != VINF_SUCCESS)
15404 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15405 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15406 if (pcInstructions)
15407 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15408 return rcStrict;
15409#endif /* Not verification mode */
15410}
15411
15412
15413
15414/**
15415 * Injects a trap, fault, abort, software interrupt or external interrupt.
15416 *
15417 * The parameter list matches TRPMQueryTrapAll pretty closely.
15418 *
15419 * @returns Strict VBox status code.
15420 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15421 * @param u8TrapNo The trap number.
15422 * @param enmType What type is it (trap/fault/abort), software
15423 * interrupt or hardware interrupt.
15424 * @param uErrCode The error code if applicable.
15425 * @param uCr2 The CR2 value if applicable.
15426 * @param cbInstr The instruction length (only relevant for
15427 * software interrupts).
15428 */
15429VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15430 uint8_t cbInstr)
15431{
15432 iemInitDecoder(pVCpu, false);
15433#ifdef DBGFTRACE_ENABLED
15434 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15435 u8TrapNo, enmType, uErrCode, uCr2);
15436#endif
15437
15438 uint32_t fFlags;
15439 switch (enmType)
15440 {
15441 case TRPM_HARDWARE_INT:
15442 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15443 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15444 uErrCode = uCr2 = 0;
15445 break;
15446
15447 case TRPM_SOFTWARE_INT:
15448 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15449 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15450 uErrCode = uCr2 = 0;
15451 break;
15452
15453 case TRPM_TRAP:
15454 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15455 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15456 if (u8TrapNo == X86_XCPT_PF)
15457 fFlags |= IEM_XCPT_FLAGS_CR2;
15458 switch (u8TrapNo)
15459 {
15460 case X86_XCPT_DF:
15461 case X86_XCPT_TS:
15462 case X86_XCPT_NP:
15463 case X86_XCPT_SS:
15464 case X86_XCPT_PF:
15465 case X86_XCPT_AC:
15466 fFlags |= IEM_XCPT_FLAGS_ERR;
15467 break;
15468
15469 case X86_XCPT_NMI:
15470 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15471 break;
15472 }
15473 break;
15474
15475 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15476 }
15477
15478 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15479}
15480
15481
15482/**
15483 * Injects the active TRPM event.
15484 *
15485 * @returns Strict VBox status code.
15486 * @param pVCpu The cross context virtual CPU structure.
15487 */
15488VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15489{
15490#ifndef IEM_IMPLEMENTS_TASKSWITCH
15491 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15492#else
15493 uint8_t u8TrapNo;
15494 TRPMEVENT enmType;
15495 RTGCUINT uErrCode;
15496 RTGCUINTPTR uCr2;
15497 uint8_t cbInstr;
15498 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15499 if (RT_FAILURE(rc))
15500 return rc;
15501
15502 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15503
15504 /** @todo Are there any other codes that imply the event was successfully
15505 * delivered to the guest? See @bugref{6607}. */
15506 if ( rcStrict == VINF_SUCCESS
15507 || rcStrict == VINF_IEM_RAISED_XCPT)
15508 {
15509 TRPMResetTrap(pVCpu);
15510 }
15511 return rcStrict;
15512#endif
15513}
15514
15515
15516VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15517{
15518 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15519 return VERR_NOT_IMPLEMENTED;
15520}
15521
15522
15523VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15524{
15525 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15526 return VERR_NOT_IMPLEMENTED;
15527}
15528
15529
15530#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15531/**
15532 * Executes a IRET instruction with default operand size.
15533 *
15534 * This is for PATM.
15535 *
15536 * @returns VBox status code.
15537 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15538 * @param pCtxCore The register frame.
15539 */
15540VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15541{
15542 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15543
15544 iemCtxCoreToCtx(pCtx, pCtxCore);
15545 iemInitDecoder(pVCpu);
15546 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15547 if (rcStrict == VINF_SUCCESS)
15548 iemCtxToCtxCore(pCtxCore, pCtx);
15549 else
15550 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15551 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15552 return rcStrict;
15553}
15554#endif
15555
15556
15557/**
15558 * Macro used by the IEMExec* method to check the given instruction length.
15559 *
15560 * Will return on failure!
15561 *
15562 * @param a_cbInstr The given instruction length.
15563 * @param a_cbMin The minimum length.
15564 */
15565#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15566 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15567 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15568
15569
15570/**
15571 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15572 *
15573 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15574 *
15575 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15577 * @param rcStrict The status code to fiddle.
15578 */
15579DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15580{
15581 iemUninitExec(pVCpu);
15582#ifdef IN_RC
15583 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15584 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15585#else
15586 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15587#endif
15588}
15589
15590
15591/**
15592 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15593 *
15594 * This API ASSUMES that the caller has already verified that the guest code is
15595 * allowed to access the I/O port. (The I/O port is in the DX register in the
15596 * guest state.)
15597 *
15598 * @returns Strict VBox status code.
15599 * @param pVCpu The cross context virtual CPU structure.
15600 * @param cbValue The size of the I/O port access (1, 2, or 4).
15601 * @param enmAddrMode The addressing mode.
15602 * @param fRepPrefix Indicates whether a repeat prefix is used
15603 * (doesn't matter which for this instruction).
15604 * @param cbInstr The instruction length in bytes.
15605 * @param iEffSeg The effective segment address.
15606 * @param fIoChecked Whether the access to the I/O port has been
15607 * checked or not. It's typically checked in the
15608 * HM scenario.
15609 */
15610VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15611 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15612{
15613 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15614 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15615
15616 /*
15617 * State init.
15618 */
15619 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15620
15621 /*
15622 * Switch orgy for getting to the right handler.
15623 */
15624 VBOXSTRICTRC rcStrict;
15625 if (fRepPrefix)
15626 {
15627 switch (enmAddrMode)
15628 {
15629 case IEMMODE_16BIT:
15630 switch (cbValue)
15631 {
15632 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15633 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15634 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15635 default:
15636 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15637 }
15638 break;
15639
15640 case IEMMODE_32BIT:
15641 switch (cbValue)
15642 {
15643 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15644 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15645 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15646 default:
15647 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15648 }
15649 break;
15650
15651 case IEMMODE_64BIT:
15652 switch (cbValue)
15653 {
15654 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15655 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15656 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15657 default:
15658 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15659 }
15660 break;
15661
15662 default:
15663 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15664 }
15665 }
15666 else
15667 {
15668 switch (enmAddrMode)
15669 {
15670 case IEMMODE_16BIT:
15671 switch (cbValue)
15672 {
15673 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15674 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15675 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15676 default:
15677 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15678 }
15679 break;
15680
15681 case IEMMODE_32BIT:
15682 switch (cbValue)
15683 {
15684 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15685 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15686 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15687 default:
15688 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15689 }
15690 break;
15691
15692 case IEMMODE_64BIT:
15693 switch (cbValue)
15694 {
15695 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15696 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15697 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15698 default:
15699 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15700 }
15701 break;
15702
15703 default:
15704 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15705 }
15706 }
15707
15708 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15709}
15710
15711
15712/**
15713 * Interface for HM and EM for executing string I/O IN (read) instructions.
15714 *
15715 * This API ASSUMES that the caller has already verified that the guest code is
15716 * allowed to access the I/O port. (The I/O port is in the DX register in the
15717 * guest state.)
15718 *
15719 * @returns Strict VBox status code.
15720 * @param pVCpu The cross context virtual CPU structure.
15721 * @param cbValue The size of the I/O port access (1, 2, or 4).
15722 * @param enmAddrMode The addressing mode.
15723 * @param fRepPrefix Indicates whether a repeat prefix is used
15724 * (doesn't matter which for this instruction).
15725 * @param cbInstr The instruction length in bytes.
15726 * @param fIoChecked Whether the access to the I/O port has been
15727 * checked or not. It's typically checked in the
15728 * HM scenario.
15729 */
15730VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15731 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15732{
15733 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15734
15735 /*
15736 * State init.
15737 */
15738 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15739
15740 /*
15741 * Switch orgy for getting to the right handler.
15742 */
15743 VBOXSTRICTRC rcStrict;
15744 if (fRepPrefix)
15745 {
15746 switch (enmAddrMode)
15747 {
15748 case IEMMODE_16BIT:
15749 switch (cbValue)
15750 {
15751 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15752 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15753 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15754 default:
15755 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15756 }
15757 break;
15758
15759 case IEMMODE_32BIT:
15760 switch (cbValue)
15761 {
15762 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15763 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15764 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15765 default:
15766 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15767 }
15768 break;
15769
15770 case IEMMODE_64BIT:
15771 switch (cbValue)
15772 {
15773 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15774 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15775 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15776 default:
15777 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15778 }
15779 break;
15780
15781 default:
15782 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15783 }
15784 }
15785 else
15786 {
15787 switch (enmAddrMode)
15788 {
15789 case IEMMODE_16BIT:
15790 switch (cbValue)
15791 {
15792 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15793 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15794 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15795 default:
15796 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15797 }
15798 break;
15799
15800 case IEMMODE_32BIT:
15801 switch (cbValue)
15802 {
15803 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15804 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15805 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15806 default:
15807 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15808 }
15809 break;
15810
15811 case IEMMODE_64BIT:
15812 switch (cbValue)
15813 {
15814 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15815 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15816 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15817 default:
15818 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15819 }
15820 break;
15821
15822 default:
15823 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15824 }
15825 }
15826
15827 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15828}
15829
15830
15831/**
15832 * Interface for rawmode to write execute an OUT instruction.
15833 *
15834 * @returns Strict VBox status code.
15835 * @param pVCpu The cross context virtual CPU structure.
15836 * @param cbInstr The instruction length in bytes.
15837 * @param u16Port The port to read.
15838 * @param cbReg The register size.
15839 *
15840 * @remarks In ring-0 not all of the state needs to be synced in.
15841 */
15842VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15843{
15844 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15845 Assert(cbReg <= 4 && cbReg != 3);
15846
15847 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15848 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15849 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15850}
15851
15852
15853/**
15854 * Interface for rawmode to write execute an IN instruction.
15855 *
15856 * @returns Strict VBox status code.
15857 * @param pVCpu The cross context virtual CPU structure.
15858 * @param cbInstr The instruction length in bytes.
15859 * @param u16Port The port to read.
15860 * @param cbReg The register size.
15861 */
15862VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15863{
15864 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15865 Assert(cbReg <= 4 && cbReg != 3);
15866
15867 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15868 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15869 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15870}
15871
15872
15873/**
15874 * Interface for HM and EM to write to a CRx register.
15875 *
15876 * @returns Strict VBox status code.
15877 * @param pVCpu The cross context virtual CPU structure.
15878 * @param cbInstr The instruction length in bytes.
15879 * @param iCrReg The control register number (destination).
15880 * @param iGReg The general purpose register number (source).
15881 *
15882 * @remarks In ring-0 not all of the state needs to be synced in.
15883 */
15884VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15885{
15886 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15887 Assert(iCrReg < 16);
15888 Assert(iGReg < 16);
15889
15890 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15891 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15892 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15893}
15894
15895
15896/**
15897 * Interface for HM and EM to read from a CRx register.
15898 *
15899 * @returns Strict VBox status code.
15900 * @param pVCpu The cross context virtual CPU structure.
15901 * @param cbInstr The instruction length in bytes.
15902 * @param iGReg The general purpose register number (destination).
15903 * @param iCrReg The control register number (source).
15904 *
15905 * @remarks In ring-0 not all of the state needs to be synced in.
15906 */
15907VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15908{
15909 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15910 Assert(iCrReg < 16);
15911 Assert(iGReg < 16);
15912
15913 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15914 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15915 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15916}
15917
15918
15919/**
15920 * Interface for HM and EM to clear the CR0[TS] bit.
15921 *
15922 * @returns Strict VBox status code.
15923 * @param pVCpu The cross context virtual CPU structure.
15924 * @param cbInstr The instruction length in bytes.
15925 *
15926 * @remarks In ring-0 not all of the state needs to be synced in.
15927 */
15928VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15929{
15930 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15931
15932 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15933 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15934 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15935}
15936
15937
15938/**
15939 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15940 *
15941 * @returns Strict VBox status code.
15942 * @param pVCpu The cross context virtual CPU structure.
15943 * @param cbInstr The instruction length in bytes.
15944 * @param uValue The value to load into CR0.
15945 *
15946 * @remarks In ring-0 not all of the state needs to be synced in.
15947 */
15948VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15949{
15950 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15951
15952 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15953 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15954 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15955}
15956
15957
15958/**
15959 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15960 *
15961 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15962 *
15963 * @returns Strict VBox status code.
15964 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15965 * @param cbInstr The instruction length in bytes.
15966 * @remarks In ring-0 not all of the state needs to be synced in.
15967 * @thread EMT(pVCpu)
15968 */
15969VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15970{
15971 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15972
15973 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15974 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15975 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15976}
15977
15978
15979/**
15980 * Checks if IEM is in the process of delivering an event (interrupt or
15981 * exception).
15982 *
15983 * @returns true if we're in the process of raising an interrupt or exception,
15984 * false otherwise.
15985 * @param pVCpu The cross context virtual CPU structure.
15986 * @param puVector Where to store the vector associated with the
15987 * currently delivered event, optional.
15988 * @param pfFlags Where to store th event delivery flags (see
15989 * IEM_XCPT_FLAGS_XXX), optional.
15990 * @param puErr Where to store the error code associated with the
15991 * event, optional.
15992 * @param puCr2 Where to store the CR2 associated with the event,
15993 * optional.
15994 * @remarks The caller should check the flags to determine if the error code and
15995 * CR2 are valid for the event.
15996 */
15997VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15998{
15999 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16000 if (fRaisingXcpt)
16001 {
16002 if (puVector)
16003 *puVector = pVCpu->iem.s.uCurXcpt;
16004 if (pfFlags)
16005 *pfFlags = pVCpu->iem.s.fCurXcpt;
16006 if (puErr)
16007 *puErr = pVCpu->iem.s.uCurXcptErr;
16008 if (puCr2)
16009 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16010 }
16011 return fRaisingXcpt;
16012}
16013
16014
16015#ifdef VBOX_WITH_NESTED_HWVIRT
16016/**
16017 * Interface for HM and EM to emulate the STGI instruction.
16018 *
16019 * @returns Strict VBox status code.
16020 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16021 * @param cbInstr The instruction length in bytes.
16022 * @thread EMT(pVCpu)
16023 */
16024VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
16025{
16026 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16027
16028 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16029 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16030 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16031}
16032
16033
16034/**
16035 * Interface for HM and EM to emulate the STGI instruction.
16036 *
16037 * @returns Strict VBox status code.
16038 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16039 * @param cbInstr The instruction length in bytes.
16040 * @thread EMT(pVCpu)
16041 */
16042VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
16043{
16044 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16045
16046 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16047 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16048 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16049}
16050
16051
16052/**
16053 * Interface for HM and EM to emulate the VMLOAD instruction.
16054 *
16055 * @returns Strict VBox status code.
16056 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16057 * @param cbInstr The instruction length in bytes.
16058 * @thread EMT(pVCpu)
16059 */
16060VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
16061{
16062 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16063
16064 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16065 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16066 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16067}
16068
16069
16070/**
16071 * Interface for HM and EM to emulate the VMSAVE instruction.
16072 *
16073 * @returns Strict VBox status code.
16074 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16075 * @param cbInstr The instruction length in bytes.
16076 * @thread EMT(pVCpu)
16077 */
16078VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
16079{
16080 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16081
16082 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16083 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16084 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16085}
16086
16087
16088/**
16089 * Interface for HM and EM to emulate the INVLPGA instruction.
16090 *
16091 * @returns Strict VBox status code.
16092 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16093 * @param cbInstr The instruction length in bytes.
16094 * @thread EMT(pVCpu)
16095 */
16096VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
16097{
16098 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16099
16100 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16101 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16102 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16103}
16104#endif /* VBOX_WITH_NESTED_HWVIRT */
16105
16106#ifdef IN_RING3
16107
16108/**
16109 * Handles the unlikely and probably fatal merge cases.
16110 *
16111 * @returns Merged status code.
16112 * @param rcStrict Current EM status code.
16113 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16114 * with @a rcStrict.
16115 * @param iMemMap The memory mapping index. For error reporting only.
16116 * @param pVCpu The cross context virtual CPU structure of the calling
16117 * thread, for error reporting only.
16118 */
16119DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16120 unsigned iMemMap, PVMCPU pVCpu)
16121{
16122 if (RT_FAILURE_NP(rcStrict))
16123 return rcStrict;
16124
16125 if (RT_FAILURE_NP(rcStrictCommit))
16126 return rcStrictCommit;
16127
16128 if (rcStrict == rcStrictCommit)
16129 return rcStrictCommit;
16130
16131 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16132 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16133 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16134 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16135 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16136 return VERR_IOM_FF_STATUS_IPE;
16137}
16138
16139
16140/**
16141 * Helper for IOMR3ProcessForceFlag.
16142 *
16143 * @returns Merged status code.
16144 * @param rcStrict Current EM status code.
16145 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16146 * with @a rcStrict.
16147 * @param iMemMap The memory mapping index. For error reporting only.
16148 * @param pVCpu The cross context virtual CPU structure of the calling
16149 * thread, for error reporting only.
16150 */
16151DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16152{
16153 /* Simple. */
16154 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16155 return rcStrictCommit;
16156
16157 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16158 return rcStrict;
16159
16160 /* EM scheduling status codes. */
16161 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16162 && rcStrict <= VINF_EM_LAST))
16163 {
16164 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16165 && rcStrictCommit <= VINF_EM_LAST))
16166 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16167 }
16168
16169 /* Unlikely */
16170 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16171}
16172
16173
16174/**
16175 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16176 *
16177 * @returns Merge between @a rcStrict and what the commit operation returned.
16178 * @param pVM The cross context VM structure.
16179 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16180 * @param rcStrict The status code returned by ring-0 or raw-mode.
16181 */
16182VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16183{
16184 /*
16185 * Reset the pending commit.
16186 */
16187 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16188 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16189 ("%#x %#x %#x\n",
16190 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16191 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16192
16193 /*
16194 * Commit the pending bounce buffers (usually just one).
16195 */
16196 unsigned cBufs = 0;
16197 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16198 while (iMemMap-- > 0)
16199 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16200 {
16201 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16202 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16203 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16204
16205 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16206 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16207 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16208
16209 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16210 {
16211 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16212 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16213 pbBuf,
16214 cbFirst,
16215 PGMACCESSORIGIN_IEM);
16216 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16217 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16218 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16219 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16220 }
16221
16222 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16223 {
16224 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16225 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16226 pbBuf + cbFirst,
16227 cbSecond,
16228 PGMACCESSORIGIN_IEM);
16229 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16230 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16231 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16232 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16233 }
16234 cBufs++;
16235 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16236 }
16237
16238 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16239 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16240 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16241 pVCpu->iem.s.cActiveMappings = 0;
16242 return rcStrict;
16243}
16244
16245#endif /* IN_RING3 */
16246
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette