VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 66887

Last change on this file since 66887 was 66887, checked in by vboxsync, 8 years ago

VMM/IEM: int1/icebp also sets EXT error bit on nested exceptions.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 620.5 KB
Line 
1/* $Id: IEMAll.cpp 66887 2017-05-15 09:55:25Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/hm_svm.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#ifdef IEM_VERIFICATION_MODE_FULL
118# include <VBox/vmm/rem.h>
119# include <VBox/vmm/mm.h>
120#endif
121#include <VBox/vmm/vm.h>
122#include <VBox/log.h>
123#include <VBox/err.h>
124#include <VBox/param.h>
125#include <VBox/dis.h>
126#include <VBox/disopcode.h>
127#include <iprt/assert.h>
128#include <iprt/string.h>
129#include <iprt/x86.h>
130
131
132/*********************************************************************************************************************************
133* Structures and Typedefs *
134*********************************************************************************************************************************/
135/** @typedef PFNIEMOP
136 * Pointer to an opcode decoder function.
137 */
138
139/** @def FNIEMOP_DEF
140 * Define an opcode decoder function.
141 *
142 * We're using macors for this so that adding and removing parameters as well as
143 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
144 *
145 * @param a_Name The function name.
146 */
147
148/** @typedef PFNIEMOPRM
149 * Pointer to an opcode decoder function with RM byte.
150 */
151
152/** @def FNIEMOPRM_DEF
153 * Define an opcode decoder function with RM byte.
154 *
155 * We're using macors for this so that adding and removing parameters as well as
156 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
157 *
158 * @param a_Name The function name.
159 */
160
161#if defined(__GNUC__) && defined(RT_ARCH_X86)
162typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
170
171#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
172typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
174# define FNIEMOP_DEF(a_Name) \
175 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
176# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
177 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
178# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
179 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
180
181#elif defined(__GNUC__)
182typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
183typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
184# define FNIEMOP_DEF(a_Name) \
185 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
186# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
187 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
188# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
189 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
190
191#else
192typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
193typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
194# define FNIEMOP_DEF(a_Name) \
195 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
196# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
197 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
198# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
199 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
200
201#endif
202#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
203
204
205/**
206 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
207 */
208typedef union IEMSELDESC
209{
210 /** The legacy view. */
211 X86DESC Legacy;
212 /** The long mode view. */
213 X86DESC64 Long;
214} IEMSELDESC;
215/** Pointer to a selector descriptor table entry. */
216typedef IEMSELDESC *PIEMSELDESC;
217
218/**
219 * CPU exception classes.
220 */
221typedef enum IEMXCPTCLASS
222{
223 IEMXCPTCLASS_BENIGN,
224 IEMXCPTCLASS_CONTRIBUTORY,
225 IEMXCPTCLASS_PAGE_FAULT
226} IEMXCPTCLASS;
227
228
229/*********************************************************************************************************************************
230* Defined Constants And Macros *
231*********************************************************************************************************************************/
232/** @def IEM_WITH_SETJMP
233 * Enables alternative status code handling using setjmps.
234 *
235 * This adds a bit of expense via the setjmp() call since it saves all the
236 * non-volatile registers. However, it eliminates return code checks and allows
237 * for more optimal return value passing (return regs instead of stack buffer).
238 */
239#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
240# define IEM_WITH_SETJMP
241#endif
242
243/** Temporary hack to disable the double execution. Will be removed in favor
244 * of a dedicated execution mode in EM. */
245//#define IEM_VERIFICATION_MODE_NO_REM
246
247/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
248 * due to GCC lacking knowledge about the value range of a switch. */
249#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
250
251/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
252#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
253
254/**
255 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
256 * occation.
257 */
258#ifdef LOG_ENABLED
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 do { \
261 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
262 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
263 } while (0)
264#else
265# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
266 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
267#endif
268
269/**
270 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
271 * occation using the supplied logger statement.
272 *
273 * @param a_LoggerArgs What to log on failure.
274 */
275#ifdef LOG_ENABLED
276# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
277 do { \
278 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
279 /*LogFunc(a_LoggerArgs);*/ \
280 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
281 } while (0)
282#else
283# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
284 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
285#endif
286
287/**
288 * Call an opcode decoder function.
289 *
290 * We're using macors for this so that adding and removing parameters can be
291 * done as we please. See FNIEMOP_DEF.
292 */
293#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
294
295/**
296 * Call a common opcode decoder function taking one extra argument.
297 *
298 * We're using macors for this so that adding and removing parameters can be
299 * done as we please. See FNIEMOP_DEF_1.
300 */
301#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
302
303/**
304 * Call a common opcode decoder function taking one extra argument.
305 *
306 * We're using macors for this so that adding and removing parameters can be
307 * done as we please. See FNIEMOP_DEF_1.
308 */
309#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
310
311/**
312 * Check if we're currently executing in real or virtual 8086 mode.
313 *
314 * @returns @c true if it is, @c false if not.
315 * @param a_pVCpu The IEM state of the current CPU.
316 */
317#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
318
319/**
320 * Check if we're currently executing in virtual 8086 mode.
321 *
322 * @returns @c true if it is, @c false if not.
323 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
324 */
325#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
326
327/**
328 * Check if we're currently executing in long mode.
329 *
330 * @returns @c true if it is, @c false if not.
331 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
332 */
333#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
334
335/**
336 * Check if we're currently executing in real mode.
337 *
338 * @returns @c true if it is, @c false if not.
339 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
340 */
341#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
342
343/**
344 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
345 * @returns PCCPUMFEATURES
346 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
347 */
348#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
349
350/**
351 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
352 * @returns PCCPUMFEATURES
353 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
354 */
355#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
356
357/**
358 * Evaluates to true if we're presenting an Intel CPU to the guest.
359 */
360#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
361
362/**
363 * Evaluates to true if we're presenting an AMD CPU to the guest.
364 */
365#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
366
367/**
368 * Check if the address is canonical.
369 */
370#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
371
372/** @def IEM_USE_UNALIGNED_DATA_ACCESS
373 * Use unaligned accesses instead of elaborate byte assembly. */
374#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
375# define IEM_USE_UNALIGNED_DATA_ACCESS
376#endif
377
378#ifdef VBOX_WITH_NESTED_HWVIRT
379/**
380 * Check the common SVM instruction preconditions.
381 */
382# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
383 do { \
384 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
385 { \
386 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
387 return iemRaiseUndefinedOpcode(pVCpu); \
388 } \
389 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
390 { \
391 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
392 return iemRaiseUndefinedOpcode(pVCpu); \
393 } \
394 if (pVCpu->iem.s.uCpl != 0) \
395 { \
396 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
397 return iemRaiseGeneralProtectionFault0(pVCpu); \
398 } \
399 } while (0)
400
401/**
402 * Check if an SVM is enabled.
403 */
404# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
405
406/**
407 * Check if an SVM control/instruction intercept is set.
408 */
409# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
410
411/**
412 * Check if an SVM read CRx intercept is set.
413 */
414# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
415
416/**
417 * Check if an SVM write CRx intercept is set.
418 */
419# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
420
421/**
422 * Check if an SVM read DRx intercept is set.
423 */
424# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
425
426/**
427 * Check if an SVM write DRx intercept is set.
428 */
429# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
430
431/**
432 * Check if an SVM exception intercept is set.
433 */
434# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uVector)))
435
436/**
437 * Invokes the SVM \#VMEXIT handler for the nested-guest.
438 */
439# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
440 do \
441 { \
442 VBOXSTRICTRC rcStrictVmExit = HMSvmNstGstVmExit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), \
443 (a_uExitInfo2)); \
444 return rcStrictVmExit == VINF_SVM_VMEXIT ? VINF_SUCCESS : rcStrictVmExit; \
445 } while (0)
446
447/**
448 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
449 * corresponding decode assist information.
450 */
451# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
452 do \
453 { \
454 uint64_t uExitInfo1; \
455 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssist \
456 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
457 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
458 else \
459 uExitInfo1 = 0; \
460 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
461 } while (0)
462
463/**
464 * Checks and handles an SVM MSR intercept.
465 */
466# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) \
467 HMSvmNstGstHandleMsrIntercept((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_idMsr), (a_fWrite))
468
469#else
470# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
471# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
472# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
473# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
474# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
475# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
476# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
477# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
478# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
479# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
480# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) (VERR_SVM_IPE_1)
481
482#endif /* VBOX_WITH_NESTED_HWVIRT */
483
484
485/*********************************************************************************************************************************
486* Global Variables *
487*********************************************************************************************************************************/
488extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
489
490
491/** Function table for the ADD instruction. */
492IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
493{
494 iemAImpl_add_u8, iemAImpl_add_u8_locked,
495 iemAImpl_add_u16, iemAImpl_add_u16_locked,
496 iemAImpl_add_u32, iemAImpl_add_u32_locked,
497 iemAImpl_add_u64, iemAImpl_add_u64_locked
498};
499
500/** Function table for the ADC instruction. */
501IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
502{
503 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
504 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
505 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
506 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
507};
508
509/** Function table for the SUB instruction. */
510IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
511{
512 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
513 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
514 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
515 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
516};
517
518/** Function table for the SBB instruction. */
519IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
520{
521 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
522 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
523 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
524 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
525};
526
527/** Function table for the OR instruction. */
528IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
529{
530 iemAImpl_or_u8, iemAImpl_or_u8_locked,
531 iemAImpl_or_u16, iemAImpl_or_u16_locked,
532 iemAImpl_or_u32, iemAImpl_or_u32_locked,
533 iemAImpl_or_u64, iemAImpl_or_u64_locked
534};
535
536/** Function table for the XOR instruction. */
537IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
538{
539 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
540 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
541 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
542 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
543};
544
545/** Function table for the AND instruction. */
546IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
547{
548 iemAImpl_and_u8, iemAImpl_and_u8_locked,
549 iemAImpl_and_u16, iemAImpl_and_u16_locked,
550 iemAImpl_and_u32, iemAImpl_and_u32_locked,
551 iemAImpl_and_u64, iemAImpl_and_u64_locked
552};
553
554/** Function table for the CMP instruction.
555 * @remarks Making operand order ASSUMPTIONS.
556 */
557IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
558{
559 iemAImpl_cmp_u8, NULL,
560 iemAImpl_cmp_u16, NULL,
561 iemAImpl_cmp_u32, NULL,
562 iemAImpl_cmp_u64, NULL
563};
564
565/** Function table for the TEST instruction.
566 * @remarks Making operand order ASSUMPTIONS.
567 */
568IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
569{
570 iemAImpl_test_u8, NULL,
571 iemAImpl_test_u16, NULL,
572 iemAImpl_test_u32, NULL,
573 iemAImpl_test_u64, NULL
574};
575
576/** Function table for the BT instruction. */
577IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
578{
579 NULL, NULL,
580 iemAImpl_bt_u16, NULL,
581 iemAImpl_bt_u32, NULL,
582 iemAImpl_bt_u64, NULL
583};
584
585/** Function table for the BTC instruction. */
586IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
587{
588 NULL, NULL,
589 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
590 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
591 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
592};
593
594/** Function table for the BTR instruction. */
595IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
596{
597 NULL, NULL,
598 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
599 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
600 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
601};
602
603/** Function table for the BTS instruction. */
604IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
605{
606 NULL, NULL,
607 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
608 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
609 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
610};
611
612/** Function table for the BSF instruction. */
613IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
614{
615 NULL, NULL,
616 iemAImpl_bsf_u16, NULL,
617 iemAImpl_bsf_u32, NULL,
618 iemAImpl_bsf_u64, NULL
619};
620
621/** Function table for the BSR instruction. */
622IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
623{
624 NULL, NULL,
625 iemAImpl_bsr_u16, NULL,
626 iemAImpl_bsr_u32, NULL,
627 iemAImpl_bsr_u64, NULL
628};
629
630/** Function table for the IMUL instruction. */
631IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
632{
633 NULL, NULL,
634 iemAImpl_imul_two_u16, NULL,
635 iemAImpl_imul_two_u32, NULL,
636 iemAImpl_imul_two_u64, NULL
637};
638
639/** Group 1 /r lookup table. */
640IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
641{
642 &g_iemAImpl_add,
643 &g_iemAImpl_or,
644 &g_iemAImpl_adc,
645 &g_iemAImpl_sbb,
646 &g_iemAImpl_and,
647 &g_iemAImpl_sub,
648 &g_iemAImpl_xor,
649 &g_iemAImpl_cmp
650};
651
652/** Function table for the INC instruction. */
653IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
654{
655 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
656 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
657 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
658 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
659};
660
661/** Function table for the DEC instruction. */
662IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
663{
664 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
665 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
666 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
667 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
668};
669
670/** Function table for the NEG instruction. */
671IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
672{
673 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
674 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
675 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
676 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
677};
678
679/** Function table for the NOT instruction. */
680IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
681{
682 iemAImpl_not_u8, iemAImpl_not_u8_locked,
683 iemAImpl_not_u16, iemAImpl_not_u16_locked,
684 iemAImpl_not_u32, iemAImpl_not_u32_locked,
685 iemAImpl_not_u64, iemAImpl_not_u64_locked
686};
687
688
689/** Function table for the ROL instruction. */
690IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
691{
692 iemAImpl_rol_u8,
693 iemAImpl_rol_u16,
694 iemAImpl_rol_u32,
695 iemAImpl_rol_u64
696};
697
698/** Function table for the ROR instruction. */
699IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
700{
701 iemAImpl_ror_u8,
702 iemAImpl_ror_u16,
703 iemAImpl_ror_u32,
704 iemAImpl_ror_u64
705};
706
707/** Function table for the RCL instruction. */
708IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
709{
710 iemAImpl_rcl_u8,
711 iemAImpl_rcl_u16,
712 iemAImpl_rcl_u32,
713 iemAImpl_rcl_u64
714};
715
716/** Function table for the RCR instruction. */
717IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
718{
719 iemAImpl_rcr_u8,
720 iemAImpl_rcr_u16,
721 iemAImpl_rcr_u32,
722 iemAImpl_rcr_u64
723};
724
725/** Function table for the SHL instruction. */
726IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
727{
728 iemAImpl_shl_u8,
729 iemAImpl_shl_u16,
730 iemAImpl_shl_u32,
731 iemAImpl_shl_u64
732};
733
734/** Function table for the SHR instruction. */
735IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
736{
737 iemAImpl_shr_u8,
738 iemAImpl_shr_u16,
739 iemAImpl_shr_u32,
740 iemAImpl_shr_u64
741};
742
743/** Function table for the SAR instruction. */
744IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
745{
746 iemAImpl_sar_u8,
747 iemAImpl_sar_u16,
748 iemAImpl_sar_u32,
749 iemAImpl_sar_u64
750};
751
752
753/** Function table for the MUL instruction. */
754IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
755{
756 iemAImpl_mul_u8,
757 iemAImpl_mul_u16,
758 iemAImpl_mul_u32,
759 iemAImpl_mul_u64
760};
761
762/** Function table for the IMUL instruction working implicitly on rAX. */
763IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
764{
765 iemAImpl_imul_u8,
766 iemAImpl_imul_u16,
767 iemAImpl_imul_u32,
768 iemAImpl_imul_u64
769};
770
771/** Function table for the DIV instruction. */
772IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
773{
774 iemAImpl_div_u8,
775 iemAImpl_div_u16,
776 iemAImpl_div_u32,
777 iemAImpl_div_u64
778};
779
780/** Function table for the MUL instruction. */
781IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
782{
783 iemAImpl_idiv_u8,
784 iemAImpl_idiv_u16,
785 iemAImpl_idiv_u32,
786 iemAImpl_idiv_u64
787};
788
789/** Function table for the SHLD instruction */
790IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
791{
792 iemAImpl_shld_u16,
793 iemAImpl_shld_u32,
794 iemAImpl_shld_u64,
795};
796
797/** Function table for the SHRD instruction */
798IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
799{
800 iemAImpl_shrd_u16,
801 iemAImpl_shrd_u32,
802 iemAImpl_shrd_u64,
803};
804
805
806/** Function table for the PUNPCKLBW instruction */
807IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
808/** Function table for the PUNPCKLBD instruction */
809IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
810/** Function table for the PUNPCKLDQ instruction */
811IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
812/** Function table for the PUNPCKLQDQ instruction */
813IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
814
815/** Function table for the PUNPCKHBW instruction */
816IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
817/** Function table for the PUNPCKHBD instruction */
818IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
819/** Function table for the PUNPCKHDQ instruction */
820IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
821/** Function table for the PUNPCKHQDQ instruction */
822IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
823
824/** Function table for the PXOR instruction */
825IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
826/** Function table for the PCMPEQB instruction */
827IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
828/** Function table for the PCMPEQW instruction */
829IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
830/** Function table for the PCMPEQD instruction */
831IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
832
833
834#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
835/** What IEM just wrote. */
836uint8_t g_abIemWrote[256];
837/** How much IEM just wrote. */
838size_t g_cbIemWrote;
839#endif
840
841
842/*********************************************************************************************************************************
843* Internal Functions *
844*********************************************************************************************************************************/
845IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
846IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
847IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
848IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
849/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
850IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
851IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
852IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
853IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
854IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
855IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
856IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
857IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
858IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
859IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
860IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
861IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
862#ifdef IEM_WITH_SETJMP
863DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
864DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
865DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
866DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
868#endif
869
870IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
871IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
872IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
873IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
874IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
875IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
876IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
877IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
878IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
879IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
880IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
881IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
882IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
883IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
884IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
885IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
886
887#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
888IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
889#endif
890IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
891IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
892
893#ifdef VBOX_WITH_NESTED_HWVIRT
894/**
895 * Checks if the intercepted IO instruction causes a \#VMEXIT and handles it
896 * accordingly.
897 *
898 * @returns VBox strict status code.
899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
900 * @param u16Port The IO port being accessed.
901 * @param enmIoType The type of IO access.
902 * @param cbReg The IO operand size in bytes.
903 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
904 * @param iEffSeg The effective segment number.
905 * @param fRep Whether this is a repeating IO instruction (REP prefix).
906 * @param fStrIo Whether this is a string IO instruction.
907 * @param cbInstr The length of the IO instruction in bytes.
908 *
909 * @remarks This must be called only when IO instructions are intercepted by the
910 * nested-guest hypervisor.
911 */
912IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
913 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
914{
915 Assert(IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
916 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
917 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
918
919 static const uint32_t s_auIoOpSize[] = { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
920 static const uint32_t s_auIoAddrSize[] = { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
921
922 SVMIOIOEXITINFO IoExitInfo;
923 IoExitInfo.u = s_auIoOpSize[cbReg & 7];
924 IoExitInfo.u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
925 IoExitInfo.n.u1STR = fStrIo;
926 IoExitInfo.n.u1REP = fRep;
927 IoExitInfo.n.u3SEG = iEffSeg & 0x7;
928 IoExitInfo.n.u1Type = enmIoType;
929 IoExitInfo.n.u16Port = u16Port;
930
931 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
932 return HMSvmNstGstHandleIOIntercept(pVCpu, pCtx, &IoExitInfo, pCtx->rip + cbInstr);
933}
934
935#else
936IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
937 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
938{
939 RT_NOREF9(pVCpu, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo, cbInstr);
940 return VERR_IEM_IPE_9;
941}
942#endif /* VBOX_WITH_NESTED_HWVIRT */
943
944
945/**
946 * Sets the pass up status.
947 *
948 * @returns VINF_SUCCESS.
949 * @param pVCpu The cross context virtual CPU structure of the
950 * calling thread.
951 * @param rcPassUp The pass up status. Must be informational.
952 * VINF_SUCCESS is not allowed.
953 */
954IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
955{
956 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
957
958 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
959 if (rcOldPassUp == VINF_SUCCESS)
960 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
961 /* If both are EM scheduling codes, use EM priority rules. */
962 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
963 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
964 {
965 if (rcPassUp < rcOldPassUp)
966 {
967 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
968 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
969 }
970 else
971 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
972 }
973 /* Override EM scheduling with specific status code. */
974 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
975 {
976 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
977 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
978 }
979 /* Don't override specific status code, first come first served. */
980 else
981 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
982 return VINF_SUCCESS;
983}
984
985
986/**
987 * Calculates the CPU mode.
988 *
989 * This is mainly for updating IEMCPU::enmCpuMode.
990 *
991 * @returns CPU mode.
992 * @param pCtx The register context for the CPU.
993 */
994DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
995{
996 if (CPUMIsGuestIn64BitCodeEx(pCtx))
997 return IEMMODE_64BIT;
998 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
999 return IEMMODE_32BIT;
1000 return IEMMODE_16BIT;
1001}
1002
1003
1004/**
1005 * Initializes the execution state.
1006 *
1007 * @param pVCpu The cross context virtual CPU structure of the
1008 * calling thread.
1009 * @param fBypassHandlers Whether to bypass access handlers.
1010 *
1011 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1012 * side-effects in strict builds.
1013 */
1014DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1015{
1016 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1017
1018 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1019
1020#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1022 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1023 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1025 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1026 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1027 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1028 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1029#endif
1030
1031#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1032 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1033#endif
1034 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1035 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1036#ifdef VBOX_STRICT
1037 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1038 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1039 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1040 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1041 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1042 pVCpu->iem.s.uRexReg = 127;
1043 pVCpu->iem.s.uRexB = 127;
1044 pVCpu->iem.s.uRexIndex = 127;
1045 pVCpu->iem.s.iEffSeg = 127;
1046 pVCpu->iem.s.idxPrefix = 127;
1047 pVCpu->iem.s.uVex3rdReg = 127;
1048 pVCpu->iem.s.uVexLength = 127;
1049 pVCpu->iem.s.fEvexStuff = 127;
1050 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1051# ifdef IEM_WITH_CODE_TLB
1052 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1053 pVCpu->iem.s.pbInstrBuf = NULL;
1054 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1055 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1056 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1057 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1058# else
1059 pVCpu->iem.s.offOpcode = 127;
1060 pVCpu->iem.s.cbOpcode = 127;
1061# endif
1062#endif
1063
1064 pVCpu->iem.s.cActiveMappings = 0;
1065 pVCpu->iem.s.iNextMapping = 0;
1066 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1067 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1068#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1069 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1070 && pCtx->cs.u64Base == 0
1071 && pCtx->cs.u32Limit == UINT32_MAX
1072 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1073 if (!pVCpu->iem.s.fInPatchCode)
1074 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1075#endif
1076
1077#ifdef IEM_VERIFICATION_MODE_FULL
1078 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1079 pVCpu->iem.s.fNoRem = true;
1080#endif
1081}
1082
1083
1084/**
1085 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1086 *
1087 * @param pVCpu The cross context virtual CPU structure of the
1088 * calling thread.
1089 */
1090DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1091{
1092 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1093#ifdef IEM_VERIFICATION_MODE_FULL
1094 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1095#endif
1096#ifdef VBOX_STRICT
1097# ifdef IEM_WITH_CODE_TLB
1098 NOREF(pVCpu);
1099# else
1100 pVCpu->iem.s.cbOpcode = 0;
1101# endif
1102#else
1103 NOREF(pVCpu);
1104#endif
1105}
1106
1107
1108/**
1109 * Initializes the decoder state.
1110 *
1111 * iemReInitDecoder is mostly a copy of this function.
1112 *
1113 * @param pVCpu The cross context virtual CPU structure of the
1114 * calling thread.
1115 * @param fBypassHandlers Whether to bypass access handlers.
1116 */
1117DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1118{
1119 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1120
1121 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1122
1123#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1128 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1129 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1130 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1131 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1132#endif
1133
1134#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1135 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1136#endif
1137 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1138#ifdef IEM_VERIFICATION_MODE_FULL
1139 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1140 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1141#endif
1142 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1143 pVCpu->iem.s.enmCpuMode = enmMode;
1144 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1145 pVCpu->iem.s.enmEffAddrMode = enmMode;
1146 if (enmMode != IEMMODE_64BIT)
1147 {
1148 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1149 pVCpu->iem.s.enmEffOpSize = enmMode;
1150 }
1151 else
1152 {
1153 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1154 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1155 }
1156 pVCpu->iem.s.fPrefixes = 0;
1157 pVCpu->iem.s.uRexReg = 0;
1158 pVCpu->iem.s.uRexB = 0;
1159 pVCpu->iem.s.uRexIndex = 0;
1160 pVCpu->iem.s.idxPrefix = 0;
1161 pVCpu->iem.s.uVex3rdReg = 0;
1162 pVCpu->iem.s.uVexLength = 0;
1163 pVCpu->iem.s.fEvexStuff = 0;
1164 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1165#ifdef IEM_WITH_CODE_TLB
1166 pVCpu->iem.s.pbInstrBuf = NULL;
1167 pVCpu->iem.s.offInstrNextByte = 0;
1168 pVCpu->iem.s.offCurInstrStart = 0;
1169# ifdef VBOX_STRICT
1170 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1171 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1172 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1173# endif
1174#else
1175 pVCpu->iem.s.offOpcode = 0;
1176 pVCpu->iem.s.cbOpcode = 0;
1177#endif
1178 pVCpu->iem.s.cActiveMappings = 0;
1179 pVCpu->iem.s.iNextMapping = 0;
1180 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1181 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1182#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1183 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1184 && pCtx->cs.u64Base == 0
1185 && pCtx->cs.u32Limit == UINT32_MAX
1186 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1187 if (!pVCpu->iem.s.fInPatchCode)
1188 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1189#endif
1190
1191#ifdef DBGFTRACE_ENABLED
1192 switch (enmMode)
1193 {
1194 case IEMMODE_64BIT:
1195 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1196 break;
1197 case IEMMODE_32BIT:
1198 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1199 break;
1200 case IEMMODE_16BIT:
1201 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1202 break;
1203 }
1204#endif
1205}
1206
1207
1208/**
1209 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1210 *
1211 * This is mostly a copy of iemInitDecoder.
1212 *
1213 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1214 */
1215DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1216{
1217 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1218
1219 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1220
1221#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1222 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1224 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1226 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1227 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1228 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1229 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1230#endif
1231
1232 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1233#ifdef IEM_VERIFICATION_MODE_FULL
1234 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1235 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1236#endif
1237 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1238 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1239 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1240 pVCpu->iem.s.enmEffAddrMode = enmMode;
1241 if (enmMode != IEMMODE_64BIT)
1242 {
1243 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1244 pVCpu->iem.s.enmEffOpSize = enmMode;
1245 }
1246 else
1247 {
1248 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1249 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1250 }
1251 pVCpu->iem.s.fPrefixes = 0;
1252 pVCpu->iem.s.uRexReg = 0;
1253 pVCpu->iem.s.uRexB = 0;
1254 pVCpu->iem.s.uRexIndex = 0;
1255 pVCpu->iem.s.idxPrefix = 0;
1256 pVCpu->iem.s.uVex3rdReg = 0;
1257 pVCpu->iem.s.uVexLength = 0;
1258 pVCpu->iem.s.fEvexStuff = 0;
1259 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1260#ifdef IEM_WITH_CODE_TLB
1261 if (pVCpu->iem.s.pbInstrBuf)
1262 {
1263 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1264 - pVCpu->iem.s.uInstrBufPc;
1265 if (off < pVCpu->iem.s.cbInstrBufTotal)
1266 {
1267 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1268 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1269 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1270 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1271 else
1272 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1273 }
1274 else
1275 {
1276 pVCpu->iem.s.pbInstrBuf = NULL;
1277 pVCpu->iem.s.offInstrNextByte = 0;
1278 pVCpu->iem.s.offCurInstrStart = 0;
1279 pVCpu->iem.s.cbInstrBuf = 0;
1280 pVCpu->iem.s.cbInstrBufTotal = 0;
1281 }
1282 }
1283 else
1284 {
1285 pVCpu->iem.s.offInstrNextByte = 0;
1286 pVCpu->iem.s.offCurInstrStart = 0;
1287 pVCpu->iem.s.cbInstrBuf = 0;
1288 pVCpu->iem.s.cbInstrBufTotal = 0;
1289 }
1290#else
1291 pVCpu->iem.s.cbOpcode = 0;
1292 pVCpu->iem.s.offOpcode = 0;
1293#endif
1294 Assert(pVCpu->iem.s.cActiveMappings == 0);
1295 pVCpu->iem.s.iNextMapping = 0;
1296 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1297 Assert(pVCpu->iem.s.fBypassHandlers == false);
1298#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1299 if (!pVCpu->iem.s.fInPatchCode)
1300 { /* likely */ }
1301 else
1302 {
1303 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1304 && pCtx->cs.u64Base == 0
1305 && pCtx->cs.u32Limit == UINT32_MAX
1306 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1307 if (!pVCpu->iem.s.fInPatchCode)
1308 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1309 }
1310#endif
1311
1312#ifdef DBGFTRACE_ENABLED
1313 switch (enmMode)
1314 {
1315 case IEMMODE_64BIT:
1316 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1317 break;
1318 case IEMMODE_32BIT:
1319 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1320 break;
1321 case IEMMODE_16BIT:
1322 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1323 break;
1324 }
1325#endif
1326}
1327
1328
1329
1330/**
1331 * Prefetch opcodes the first time when starting executing.
1332 *
1333 * @returns Strict VBox status code.
1334 * @param pVCpu The cross context virtual CPU structure of the
1335 * calling thread.
1336 * @param fBypassHandlers Whether to bypass access handlers.
1337 */
1338IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1339{
1340#ifdef IEM_VERIFICATION_MODE_FULL
1341 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1342#endif
1343 iemInitDecoder(pVCpu, fBypassHandlers);
1344
1345#ifdef IEM_WITH_CODE_TLB
1346 /** @todo Do ITLB lookup here. */
1347
1348#else /* !IEM_WITH_CODE_TLB */
1349
1350 /*
1351 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1352 *
1353 * First translate CS:rIP to a physical address.
1354 */
1355 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1356 uint32_t cbToTryRead;
1357 RTGCPTR GCPtrPC;
1358 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1359 {
1360 cbToTryRead = PAGE_SIZE;
1361 GCPtrPC = pCtx->rip;
1362 if (IEM_IS_CANONICAL(GCPtrPC))
1363 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1364 else
1365 return iemRaiseGeneralProtectionFault0(pVCpu);
1366 }
1367 else
1368 {
1369 uint32_t GCPtrPC32 = pCtx->eip;
1370 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1371 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1372 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1373 else
1374 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1375 if (cbToTryRead) { /* likely */ }
1376 else /* overflowed */
1377 {
1378 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1379 cbToTryRead = UINT32_MAX;
1380 }
1381 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1382 Assert(GCPtrPC <= UINT32_MAX);
1383 }
1384
1385# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1386 /* Allow interpretation of patch manager code blocks since they can for
1387 instance throw #PFs for perfectly good reasons. */
1388 if (pVCpu->iem.s.fInPatchCode)
1389 {
1390 size_t cbRead = 0;
1391 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1392 AssertRCReturn(rc, rc);
1393 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1394 return VINF_SUCCESS;
1395 }
1396# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1397
1398 RTGCPHYS GCPhys;
1399 uint64_t fFlags;
1400 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1401 if (RT_SUCCESS(rc)) { /* probable */ }
1402 else
1403 {
1404 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1405 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1406 }
1407 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1408 else
1409 {
1410 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1411 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1412 }
1413 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1414 else
1415 {
1416 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1417 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1418 }
1419 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1420 /** @todo Check reserved bits and such stuff. PGM is better at doing
1421 * that, so do it when implementing the guest virtual address
1422 * TLB... */
1423
1424# ifdef IEM_VERIFICATION_MODE_FULL
1425 /*
1426 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1427 * instruction.
1428 */
1429 /** @todo optimize this differently by not using PGMPhysRead. */
1430 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1431 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1432 if ( offPrevOpcodes < cbOldOpcodes
1433 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1434 {
1435 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1436 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1437 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1438 pVCpu->iem.s.cbOpcode = cbNew;
1439 return VINF_SUCCESS;
1440 }
1441# endif
1442
1443 /*
1444 * Read the bytes at this address.
1445 */
1446 PVM pVM = pVCpu->CTX_SUFF(pVM);
1447# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1448 size_t cbActual;
1449 if ( PATMIsEnabled(pVM)
1450 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1451 {
1452 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1453 Assert(cbActual > 0);
1454 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1455 }
1456 else
1457# endif
1458 {
1459 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1460 if (cbToTryRead > cbLeftOnPage)
1461 cbToTryRead = cbLeftOnPage;
1462 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1463 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1464
1465 if (!pVCpu->iem.s.fBypassHandlers)
1466 {
1467 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1468 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1469 { /* likely */ }
1470 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1471 {
1472 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1473 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1474 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1475 }
1476 else
1477 {
1478 Log((RT_SUCCESS(rcStrict)
1479 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1480 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1481 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1482 return rcStrict;
1483 }
1484 }
1485 else
1486 {
1487 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1488 if (RT_SUCCESS(rc))
1489 { /* likely */ }
1490 else
1491 {
1492 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1493 GCPtrPC, GCPhys, rc, cbToTryRead));
1494 return rc;
1495 }
1496 }
1497 pVCpu->iem.s.cbOpcode = cbToTryRead;
1498 }
1499#endif /* !IEM_WITH_CODE_TLB */
1500 return VINF_SUCCESS;
1501}
1502
1503
1504/**
1505 * Invalidates the IEM TLBs.
1506 *
1507 * This is called internally as well as by PGM when moving GC mappings.
1508 *
1509 * @returns
1510 * @param pVCpu The cross context virtual CPU structure of the calling
1511 * thread.
1512 * @param fVmm Set when PGM calls us with a remapping.
1513 */
1514VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1515{
1516#ifdef IEM_WITH_CODE_TLB
1517 pVCpu->iem.s.cbInstrBufTotal = 0;
1518 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1519 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1520 { /* very likely */ }
1521 else
1522 {
1523 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1524 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1525 while (i-- > 0)
1526 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1527 }
1528#endif
1529
1530#ifdef IEM_WITH_DATA_TLB
1531 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1532 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1533 { /* very likely */ }
1534 else
1535 {
1536 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1537 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1538 while (i-- > 0)
1539 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1540 }
1541#endif
1542 NOREF(pVCpu); NOREF(fVmm);
1543}
1544
1545
1546/**
1547 * Invalidates a page in the TLBs.
1548 *
1549 * @param pVCpu The cross context virtual CPU structure of the calling
1550 * thread.
1551 * @param GCPtr The address of the page to invalidate
1552 */
1553VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1554{
1555#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1556 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1557 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1558 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1559 uintptr_t idx = (uint8_t)GCPtr;
1560
1561# ifdef IEM_WITH_CODE_TLB
1562 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1563 {
1564 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1565 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1566 pVCpu->iem.s.cbInstrBufTotal = 0;
1567 }
1568# endif
1569
1570# ifdef IEM_WITH_DATA_TLB
1571 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1572 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1573# endif
1574#else
1575 NOREF(pVCpu); NOREF(GCPtr);
1576#endif
1577}
1578
1579
1580/**
1581 * Invalidates the host physical aspects of the IEM TLBs.
1582 *
1583 * This is called internally as well as by PGM when moving GC mappings.
1584 *
1585 * @param pVCpu The cross context virtual CPU structure of the calling
1586 * thread.
1587 */
1588VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1589{
1590#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1591 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1592
1593# ifdef IEM_WITH_CODE_TLB
1594 pVCpu->iem.s.cbInstrBufTotal = 0;
1595# endif
1596 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1597 if (uTlbPhysRev != 0)
1598 {
1599 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1600 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1601 }
1602 else
1603 {
1604 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1605 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1606
1607 unsigned i;
1608# ifdef IEM_WITH_CODE_TLB
1609 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1610 while (i-- > 0)
1611 {
1612 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1613 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1614 }
1615# endif
1616# ifdef IEM_WITH_DATA_TLB
1617 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1618 while (i-- > 0)
1619 {
1620 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1621 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1622 }
1623# endif
1624 }
1625#else
1626 NOREF(pVCpu);
1627#endif
1628}
1629
1630
1631/**
1632 * Invalidates the host physical aspects of the IEM TLBs.
1633 *
1634 * This is called internally as well as by PGM when moving GC mappings.
1635 *
1636 * @param pVM The cross context VM structure.
1637 *
1638 * @remarks Caller holds the PGM lock.
1639 */
1640VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1641{
1642 RT_NOREF_PV(pVM);
1643}
1644
1645#ifdef IEM_WITH_CODE_TLB
1646
1647/**
1648 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1649 * failure and jumps.
1650 *
1651 * We end up here for a number of reasons:
1652 * - pbInstrBuf isn't yet initialized.
1653 * - Advancing beyond the buffer boundrary (e.g. cross page).
1654 * - Advancing beyond the CS segment limit.
1655 * - Fetching from non-mappable page (e.g. MMIO).
1656 *
1657 * @param pVCpu The cross context virtual CPU structure of the
1658 * calling thread.
1659 * @param pvDst Where to return the bytes.
1660 * @param cbDst Number of bytes to read.
1661 *
1662 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1663 */
1664IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1665{
1666#ifdef IN_RING3
1667//__debugbreak();
1668 for (;;)
1669 {
1670 Assert(cbDst <= 8);
1671 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1672
1673 /*
1674 * We might have a partial buffer match, deal with that first to make the
1675 * rest simpler. This is the first part of the cross page/buffer case.
1676 */
1677 if (pVCpu->iem.s.pbInstrBuf != NULL)
1678 {
1679 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1680 {
1681 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1682 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1683 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1684
1685 cbDst -= cbCopy;
1686 pvDst = (uint8_t *)pvDst + cbCopy;
1687 offBuf += cbCopy;
1688 pVCpu->iem.s.offInstrNextByte += offBuf;
1689 }
1690 }
1691
1692 /*
1693 * Check segment limit, figuring how much we're allowed to access at this point.
1694 *
1695 * We will fault immediately if RIP is past the segment limit / in non-canonical
1696 * territory. If we do continue, there are one or more bytes to read before we
1697 * end up in trouble and we need to do that first before faulting.
1698 */
1699 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1700 RTGCPTR GCPtrFirst;
1701 uint32_t cbMaxRead;
1702 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1703 {
1704 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1705 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1706 { /* likely */ }
1707 else
1708 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1709 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1710 }
1711 else
1712 {
1713 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1714 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1715 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1716 { /* likely */ }
1717 else
1718 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1719 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1720 if (cbMaxRead != 0)
1721 { /* likely */ }
1722 else
1723 {
1724 /* Overflowed because address is 0 and limit is max. */
1725 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1726 cbMaxRead = X86_PAGE_SIZE;
1727 }
1728 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1729 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1730 if (cbMaxRead2 < cbMaxRead)
1731 cbMaxRead = cbMaxRead2;
1732 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1733 }
1734
1735 /*
1736 * Get the TLB entry for this piece of code.
1737 */
1738 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1739 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1740 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1741 if (pTlbe->uTag == uTag)
1742 {
1743 /* likely when executing lots of code, otherwise unlikely */
1744# ifdef VBOX_WITH_STATISTICS
1745 pVCpu->iem.s.CodeTlb.cTlbHits++;
1746# endif
1747 }
1748 else
1749 {
1750 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1751# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1752 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1753 {
1754 pTlbe->uTag = uTag;
1755 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1756 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1757 pTlbe->GCPhys = NIL_RTGCPHYS;
1758 pTlbe->pbMappingR3 = NULL;
1759 }
1760 else
1761# endif
1762 {
1763 RTGCPHYS GCPhys;
1764 uint64_t fFlags;
1765 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1766 if (RT_FAILURE(rc))
1767 {
1768 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1769 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1770 }
1771
1772 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1773 pTlbe->uTag = uTag;
1774 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1775 pTlbe->GCPhys = GCPhys;
1776 pTlbe->pbMappingR3 = NULL;
1777 }
1778 }
1779
1780 /*
1781 * Check TLB page table level access flags.
1782 */
1783 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1784 {
1785 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1786 {
1787 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1788 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1789 }
1790 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1791 {
1792 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1793 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1794 }
1795 }
1796
1797# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1798 /*
1799 * Allow interpretation of patch manager code blocks since they can for
1800 * instance throw #PFs for perfectly good reasons.
1801 */
1802 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1803 { /* no unlikely */ }
1804 else
1805 {
1806 /** @todo Could be optimized this a little in ring-3 if we liked. */
1807 size_t cbRead = 0;
1808 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1809 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1810 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1811 return;
1812 }
1813# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1814
1815 /*
1816 * Look up the physical page info if necessary.
1817 */
1818 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1819 { /* not necessary */ }
1820 else
1821 {
1822 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1823 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1824 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1825 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1826 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1827 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1828 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1829 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1830 }
1831
1832# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1833 /*
1834 * Try do a direct read using the pbMappingR3 pointer.
1835 */
1836 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1837 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1838 {
1839 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1840 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1841 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1842 {
1843 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1844 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1845 }
1846 else
1847 {
1848 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1849 Assert(cbInstr < cbMaxRead);
1850 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1851 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1852 }
1853 if (cbDst <= cbMaxRead)
1854 {
1855 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1856 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1857 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1858 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1859 return;
1860 }
1861 pVCpu->iem.s.pbInstrBuf = NULL;
1862
1863 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1864 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1865 }
1866 else
1867# endif
1868#if 0
1869 /*
1870 * If there is no special read handling, so we can read a bit more and
1871 * put it in the prefetch buffer.
1872 */
1873 if ( cbDst < cbMaxRead
1874 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1875 {
1876 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1877 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1878 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1879 { /* likely */ }
1880 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1881 {
1882 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1883 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1884 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1885 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1886 }
1887 else
1888 {
1889 Log((RT_SUCCESS(rcStrict)
1890 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1891 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1892 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1893 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1894 }
1895 }
1896 /*
1897 * Special read handling, so only read exactly what's needed.
1898 * This is a highly unlikely scenario.
1899 */
1900 else
1901#endif
1902 {
1903 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1904 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1905 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1906 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1907 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1908 { /* likely */ }
1909 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1910 {
1911 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1912 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1913 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1914 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1915 }
1916 else
1917 {
1918 Log((RT_SUCCESS(rcStrict)
1919 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1920 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1921 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1922 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1923 }
1924 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1925 if (cbToRead == cbDst)
1926 return;
1927 }
1928
1929 /*
1930 * More to read, loop.
1931 */
1932 cbDst -= cbMaxRead;
1933 pvDst = (uint8_t *)pvDst + cbMaxRead;
1934 }
1935#else
1936 RT_NOREF(pvDst, cbDst);
1937 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1938#endif
1939}
1940
1941#else
1942
1943/**
1944 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1945 * exception if it fails.
1946 *
1947 * @returns Strict VBox status code.
1948 * @param pVCpu The cross context virtual CPU structure of the
1949 * calling thread.
1950 * @param cbMin The minimum number of bytes relative offOpcode
1951 * that must be read.
1952 */
1953IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1954{
1955 /*
1956 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1957 *
1958 * First translate CS:rIP to a physical address.
1959 */
1960 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1961 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1962 uint32_t cbToTryRead;
1963 RTGCPTR GCPtrNext;
1964 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1965 {
1966 cbToTryRead = PAGE_SIZE;
1967 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1968 if (!IEM_IS_CANONICAL(GCPtrNext))
1969 return iemRaiseGeneralProtectionFault0(pVCpu);
1970 }
1971 else
1972 {
1973 uint32_t GCPtrNext32 = pCtx->eip;
1974 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1975 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1976 if (GCPtrNext32 > pCtx->cs.u32Limit)
1977 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1978 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1979 if (!cbToTryRead) /* overflowed */
1980 {
1981 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1982 cbToTryRead = UINT32_MAX;
1983 /** @todo check out wrapping around the code segment. */
1984 }
1985 if (cbToTryRead < cbMin - cbLeft)
1986 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1987 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1988 }
1989
1990 /* Only read up to the end of the page, and make sure we don't read more
1991 than the opcode buffer can hold. */
1992 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1993 if (cbToTryRead > cbLeftOnPage)
1994 cbToTryRead = cbLeftOnPage;
1995 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1996 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1997/** @todo r=bird: Convert assertion into undefined opcode exception? */
1998 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1999
2000# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2001 /* Allow interpretation of patch manager code blocks since they can for
2002 instance throw #PFs for perfectly good reasons. */
2003 if (pVCpu->iem.s.fInPatchCode)
2004 {
2005 size_t cbRead = 0;
2006 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2007 AssertRCReturn(rc, rc);
2008 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2009 return VINF_SUCCESS;
2010 }
2011# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2012
2013 RTGCPHYS GCPhys;
2014 uint64_t fFlags;
2015 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2016 if (RT_FAILURE(rc))
2017 {
2018 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2019 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2020 }
2021 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2022 {
2023 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2024 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2025 }
2026 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2027 {
2028 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2029 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2030 }
2031 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2032 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2033 /** @todo Check reserved bits and such stuff. PGM is better at doing
2034 * that, so do it when implementing the guest virtual address
2035 * TLB... */
2036
2037 /*
2038 * Read the bytes at this address.
2039 *
2040 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2041 * and since PATM should only patch the start of an instruction there
2042 * should be no need to check again here.
2043 */
2044 if (!pVCpu->iem.s.fBypassHandlers)
2045 {
2046 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2047 cbToTryRead, PGMACCESSORIGIN_IEM);
2048 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2049 { /* likely */ }
2050 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2051 {
2052 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2053 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2054 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2055 }
2056 else
2057 {
2058 Log((RT_SUCCESS(rcStrict)
2059 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2060 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2061 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2062 return rcStrict;
2063 }
2064 }
2065 else
2066 {
2067 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2068 if (RT_SUCCESS(rc))
2069 { /* likely */ }
2070 else
2071 {
2072 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2073 return rc;
2074 }
2075 }
2076 pVCpu->iem.s.cbOpcode += cbToTryRead;
2077 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2078
2079 return VINF_SUCCESS;
2080}
2081
2082#endif /* !IEM_WITH_CODE_TLB */
2083#ifndef IEM_WITH_SETJMP
2084
2085/**
2086 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2087 *
2088 * @returns Strict VBox status code.
2089 * @param pVCpu The cross context virtual CPU structure of the
2090 * calling thread.
2091 * @param pb Where to return the opcode byte.
2092 */
2093DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2094{
2095 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2096 if (rcStrict == VINF_SUCCESS)
2097 {
2098 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2099 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2100 pVCpu->iem.s.offOpcode = offOpcode + 1;
2101 }
2102 else
2103 *pb = 0;
2104 return rcStrict;
2105}
2106
2107
2108/**
2109 * Fetches the next opcode byte.
2110 *
2111 * @returns Strict VBox status code.
2112 * @param pVCpu The cross context virtual CPU structure of the
2113 * calling thread.
2114 * @param pu8 Where to return the opcode byte.
2115 */
2116DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2117{
2118 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2119 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2120 {
2121 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2122 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2123 return VINF_SUCCESS;
2124 }
2125 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2126}
2127
2128#else /* IEM_WITH_SETJMP */
2129
2130/**
2131 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2132 *
2133 * @returns The opcode byte.
2134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2135 */
2136DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2137{
2138# ifdef IEM_WITH_CODE_TLB
2139 uint8_t u8;
2140 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2141 return u8;
2142# else
2143 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2144 if (rcStrict == VINF_SUCCESS)
2145 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2146 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2147# endif
2148}
2149
2150
2151/**
2152 * Fetches the next opcode byte, longjmp on error.
2153 *
2154 * @returns The opcode byte.
2155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2156 */
2157DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2158{
2159# ifdef IEM_WITH_CODE_TLB
2160 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2161 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2162 if (RT_LIKELY( pbBuf != NULL
2163 && offBuf < pVCpu->iem.s.cbInstrBuf))
2164 {
2165 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2166 return pbBuf[offBuf];
2167 }
2168# else
2169 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2170 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2171 {
2172 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2173 return pVCpu->iem.s.abOpcode[offOpcode];
2174 }
2175# endif
2176 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2177}
2178
2179#endif /* IEM_WITH_SETJMP */
2180
2181/**
2182 * Fetches the next opcode byte, returns automatically on failure.
2183 *
2184 * @param a_pu8 Where to return the opcode byte.
2185 * @remark Implicitly references pVCpu.
2186 */
2187#ifndef IEM_WITH_SETJMP
2188# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2189 do \
2190 { \
2191 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2192 if (rcStrict2 == VINF_SUCCESS) \
2193 { /* likely */ } \
2194 else \
2195 return rcStrict2; \
2196 } while (0)
2197#else
2198# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2199#endif /* IEM_WITH_SETJMP */
2200
2201
2202#ifndef IEM_WITH_SETJMP
2203/**
2204 * Fetches the next signed byte from the opcode stream.
2205 *
2206 * @returns Strict VBox status code.
2207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2208 * @param pi8 Where to return the signed byte.
2209 */
2210DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2211{
2212 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2213}
2214#endif /* !IEM_WITH_SETJMP */
2215
2216
2217/**
2218 * Fetches the next signed byte from the opcode stream, returning automatically
2219 * on failure.
2220 *
2221 * @param a_pi8 Where to return the signed byte.
2222 * @remark Implicitly references pVCpu.
2223 */
2224#ifndef IEM_WITH_SETJMP
2225# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2226 do \
2227 { \
2228 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2229 if (rcStrict2 != VINF_SUCCESS) \
2230 return rcStrict2; \
2231 } while (0)
2232#else /* IEM_WITH_SETJMP */
2233# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2234
2235#endif /* IEM_WITH_SETJMP */
2236
2237#ifndef IEM_WITH_SETJMP
2238
2239/**
2240 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2241 *
2242 * @returns Strict VBox status code.
2243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2244 * @param pu16 Where to return the opcode dword.
2245 */
2246DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2247{
2248 uint8_t u8;
2249 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2250 if (rcStrict == VINF_SUCCESS)
2251 *pu16 = (int8_t)u8;
2252 return rcStrict;
2253}
2254
2255
2256/**
2257 * Fetches the next signed byte from the opcode stream, extending it to
2258 * unsigned 16-bit.
2259 *
2260 * @returns Strict VBox status code.
2261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2262 * @param pu16 Where to return the unsigned word.
2263 */
2264DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2265{
2266 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2267 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2268 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2269
2270 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2271 pVCpu->iem.s.offOpcode = offOpcode + 1;
2272 return VINF_SUCCESS;
2273}
2274
2275#endif /* !IEM_WITH_SETJMP */
2276
2277/**
2278 * Fetches the next signed byte from the opcode stream and sign-extending it to
2279 * a word, returning automatically on failure.
2280 *
2281 * @param a_pu16 Where to return the word.
2282 * @remark Implicitly references pVCpu.
2283 */
2284#ifndef IEM_WITH_SETJMP
2285# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2286 do \
2287 { \
2288 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2289 if (rcStrict2 != VINF_SUCCESS) \
2290 return rcStrict2; \
2291 } while (0)
2292#else
2293# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2294#endif
2295
2296#ifndef IEM_WITH_SETJMP
2297
2298/**
2299 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2300 *
2301 * @returns Strict VBox status code.
2302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2303 * @param pu32 Where to return the opcode dword.
2304 */
2305DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2306{
2307 uint8_t u8;
2308 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2309 if (rcStrict == VINF_SUCCESS)
2310 *pu32 = (int8_t)u8;
2311 return rcStrict;
2312}
2313
2314
2315/**
2316 * Fetches the next signed byte from the opcode stream, extending it to
2317 * unsigned 32-bit.
2318 *
2319 * @returns Strict VBox status code.
2320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2321 * @param pu32 Where to return the unsigned dword.
2322 */
2323DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2324{
2325 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2326 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2327 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2328
2329 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2330 pVCpu->iem.s.offOpcode = offOpcode + 1;
2331 return VINF_SUCCESS;
2332}
2333
2334#endif /* !IEM_WITH_SETJMP */
2335
2336/**
2337 * Fetches the next signed byte from the opcode stream and sign-extending it to
2338 * a word, returning automatically on failure.
2339 *
2340 * @param a_pu32 Where to return the word.
2341 * @remark Implicitly references pVCpu.
2342 */
2343#ifndef IEM_WITH_SETJMP
2344#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2345 do \
2346 { \
2347 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2348 if (rcStrict2 != VINF_SUCCESS) \
2349 return rcStrict2; \
2350 } while (0)
2351#else
2352# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2353#endif
2354
2355#ifndef IEM_WITH_SETJMP
2356
2357/**
2358 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2359 *
2360 * @returns Strict VBox status code.
2361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2362 * @param pu64 Where to return the opcode qword.
2363 */
2364DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2365{
2366 uint8_t u8;
2367 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2368 if (rcStrict == VINF_SUCCESS)
2369 *pu64 = (int8_t)u8;
2370 return rcStrict;
2371}
2372
2373
2374/**
2375 * Fetches the next signed byte from the opcode stream, extending it to
2376 * unsigned 64-bit.
2377 *
2378 * @returns Strict VBox status code.
2379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2380 * @param pu64 Where to return the unsigned qword.
2381 */
2382DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2383{
2384 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2385 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2386 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2387
2388 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2389 pVCpu->iem.s.offOpcode = offOpcode + 1;
2390 return VINF_SUCCESS;
2391}
2392
2393#endif /* !IEM_WITH_SETJMP */
2394
2395
2396/**
2397 * Fetches the next signed byte from the opcode stream and sign-extending it to
2398 * a word, returning automatically on failure.
2399 *
2400 * @param a_pu64 Where to return the word.
2401 * @remark Implicitly references pVCpu.
2402 */
2403#ifndef IEM_WITH_SETJMP
2404# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2405 do \
2406 { \
2407 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2408 if (rcStrict2 != VINF_SUCCESS) \
2409 return rcStrict2; \
2410 } while (0)
2411#else
2412# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2413#endif
2414
2415
2416#ifndef IEM_WITH_SETJMP
2417
2418/**
2419 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2420 *
2421 * @returns Strict VBox status code.
2422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2423 * @param pu16 Where to return the opcode word.
2424 */
2425DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2426{
2427 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2428 if (rcStrict == VINF_SUCCESS)
2429 {
2430 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2431# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2432 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2433# else
2434 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2435# endif
2436 pVCpu->iem.s.offOpcode = offOpcode + 2;
2437 }
2438 else
2439 *pu16 = 0;
2440 return rcStrict;
2441}
2442
2443
2444/**
2445 * Fetches the next opcode word.
2446 *
2447 * @returns Strict VBox status code.
2448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2449 * @param pu16 Where to return the opcode word.
2450 */
2451DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2452{
2453 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2454 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2455 {
2456 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2457# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2458 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2459# else
2460 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2461# endif
2462 return VINF_SUCCESS;
2463 }
2464 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2465}
2466
2467#else /* IEM_WITH_SETJMP */
2468
2469/**
2470 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2471 *
2472 * @returns The opcode word.
2473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2474 */
2475DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2476{
2477# ifdef IEM_WITH_CODE_TLB
2478 uint16_t u16;
2479 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2480 return u16;
2481# else
2482 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2483 if (rcStrict == VINF_SUCCESS)
2484 {
2485 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2486 pVCpu->iem.s.offOpcode += 2;
2487# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2488 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2489# else
2490 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2491# endif
2492 }
2493 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2494# endif
2495}
2496
2497
2498/**
2499 * Fetches the next opcode word, longjmp on error.
2500 *
2501 * @returns The opcode word.
2502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2503 */
2504DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2505{
2506# ifdef IEM_WITH_CODE_TLB
2507 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2508 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2509 if (RT_LIKELY( pbBuf != NULL
2510 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2511 {
2512 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2513# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2514 return *(uint16_t const *)&pbBuf[offBuf];
2515# else
2516 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2517# endif
2518 }
2519# else
2520 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2521 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2522 {
2523 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2524# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2525 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2526# else
2527 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2528# endif
2529 }
2530# endif
2531 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2532}
2533
2534#endif /* IEM_WITH_SETJMP */
2535
2536
2537/**
2538 * Fetches the next opcode word, returns automatically on failure.
2539 *
2540 * @param a_pu16 Where to return the opcode word.
2541 * @remark Implicitly references pVCpu.
2542 */
2543#ifndef IEM_WITH_SETJMP
2544# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2545 do \
2546 { \
2547 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2548 if (rcStrict2 != VINF_SUCCESS) \
2549 return rcStrict2; \
2550 } while (0)
2551#else
2552# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2553#endif
2554
2555#ifndef IEM_WITH_SETJMP
2556
2557/**
2558 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2559 *
2560 * @returns Strict VBox status code.
2561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2562 * @param pu32 Where to return the opcode double word.
2563 */
2564DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2565{
2566 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2567 if (rcStrict == VINF_SUCCESS)
2568 {
2569 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2570 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2571 pVCpu->iem.s.offOpcode = offOpcode + 2;
2572 }
2573 else
2574 *pu32 = 0;
2575 return rcStrict;
2576}
2577
2578
2579/**
2580 * Fetches the next opcode word, zero extending it to a double word.
2581 *
2582 * @returns Strict VBox status code.
2583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2584 * @param pu32 Where to return the opcode double word.
2585 */
2586DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2587{
2588 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2589 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2590 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2591
2592 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2593 pVCpu->iem.s.offOpcode = offOpcode + 2;
2594 return VINF_SUCCESS;
2595}
2596
2597#endif /* !IEM_WITH_SETJMP */
2598
2599
2600/**
2601 * Fetches the next opcode word and zero extends it to a double word, returns
2602 * automatically on failure.
2603 *
2604 * @param a_pu32 Where to return the opcode double word.
2605 * @remark Implicitly references pVCpu.
2606 */
2607#ifndef IEM_WITH_SETJMP
2608# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2609 do \
2610 { \
2611 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2612 if (rcStrict2 != VINF_SUCCESS) \
2613 return rcStrict2; \
2614 } while (0)
2615#else
2616# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2617#endif
2618
2619#ifndef IEM_WITH_SETJMP
2620
2621/**
2622 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2623 *
2624 * @returns Strict VBox status code.
2625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2626 * @param pu64 Where to return the opcode quad word.
2627 */
2628DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2629{
2630 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2631 if (rcStrict == VINF_SUCCESS)
2632 {
2633 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2634 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2635 pVCpu->iem.s.offOpcode = offOpcode + 2;
2636 }
2637 else
2638 *pu64 = 0;
2639 return rcStrict;
2640}
2641
2642
2643/**
2644 * Fetches the next opcode word, zero extending it to a quad word.
2645 *
2646 * @returns Strict VBox status code.
2647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2648 * @param pu64 Where to return the opcode quad word.
2649 */
2650DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2651{
2652 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2653 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2654 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2655
2656 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2657 pVCpu->iem.s.offOpcode = offOpcode + 2;
2658 return VINF_SUCCESS;
2659}
2660
2661#endif /* !IEM_WITH_SETJMP */
2662
2663/**
2664 * Fetches the next opcode word and zero extends it to a quad word, returns
2665 * automatically on failure.
2666 *
2667 * @param a_pu64 Where to return the opcode quad word.
2668 * @remark Implicitly references pVCpu.
2669 */
2670#ifndef IEM_WITH_SETJMP
2671# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2672 do \
2673 { \
2674 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2675 if (rcStrict2 != VINF_SUCCESS) \
2676 return rcStrict2; \
2677 } while (0)
2678#else
2679# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2680#endif
2681
2682
2683#ifndef IEM_WITH_SETJMP
2684/**
2685 * Fetches the next signed word from the opcode stream.
2686 *
2687 * @returns Strict VBox status code.
2688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2689 * @param pi16 Where to return the signed word.
2690 */
2691DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2692{
2693 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2694}
2695#endif /* !IEM_WITH_SETJMP */
2696
2697
2698/**
2699 * Fetches the next signed word from the opcode stream, returning automatically
2700 * on failure.
2701 *
2702 * @param a_pi16 Where to return the signed word.
2703 * @remark Implicitly references pVCpu.
2704 */
2705#ifndef IEM_WITH_SETJMP
2706# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2707 do \
2708 { \
2709 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2710 if (rcStrict2 != VINF_SUCCESS) \
2711 return rcStrict2; \
2712 } while (0)
2713#else
2714# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2715#endif
2716
2717#ifndef IEM_WITH_SETJMP
2718
2719/**
2720 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2721 *
2722 * @returns Strict VBox status code.
2723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2724 * @param pu32 Where to return the opcode dword.
2725 */
2726DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2727{
2728 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2729 if (rcStrict == VINF_SUCCESS)
2730 {
2731 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2732# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2733 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2734# else
2735 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2736 pVCpu->iem.s.abOpcode[offOpcode + 1],
2737 pVCpu->iem.s.abOpcode[offOpcode + 2],
2738 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2739# endif
2740 pVCpu->iem.s.offOpcode = offOpcode + 4;
2741 }
2742 else
2743 *pu32 = 0;
2744 return rcStrict;
2745}
2746
2747
2748/**
2749 * Fetches the next opcode dword.
2750 *
2751 * @returns Strict VBox status code.
2752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2753 * @param pu32 Where to return the opcode double word.
2754 */
2755DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2756{
2757 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2758 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2759 {
2760 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2761# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2762 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2763# else
2764 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2765 pVCpu->iem.s.abOpcode[offOpcode + 1],
2766 pVCpu->iem.s.abOpcode[offOpcode + 2],
2767 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2768# endif
2769 return VINF_SUCCESS;
2770 }
2771 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2772}
2773
2774#else /* !IEM_WITH_SETJMP */
2775
2776/**
2777 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2778 *
2779 * @returns The opcode dword.
2780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2781 */
2782DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2783{
2784# ifdef IEM_WITH_CODE_TLB
2785 uint32_t u32;
2786 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2787 return u32;
2788# else
2789 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2790 if (rcStrict == VINF_SUCCESS)
2791 {
2792 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2793 pVCpu->iem.s.offOpcode = offOpcode + 4;
2794# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2795 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2796# else
2797 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2798 pVCpu->iem.s.abOpcode[offOpcode + 1],
2799 pVCpu->iem.s.abOpcode[offOpcode + 2],
2800 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2801# endif
2802 }
2803 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2804# endif
2805}
2806
2807
2808/**
2809 * Fetches the next opcode dword, longjmp on error.
2810 *
2811 * @returns The opcode dword.
2812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2813 */
2814DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2815{
2816# ifdef IEM_WITH_CODE_TLB
2817 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2818 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2819 if (RT_LIKELY( pbBuf != NULL
2820 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2821 {
2822 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2823# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2824 return *(uint32_t const *)&pbBuf[offBuf];
2825# else
2826 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2827 pbBuf[offBuf + 1],
2828 pbBuf[offBuf + 2],
2829 pbBuf[offBuf + 3]);
2830# endif
2831 }
2832# else
2833 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2834 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2835 {
2836 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2837# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2838 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2839# else
2840 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2841 pVCpu->iem.s.abOpcode[offOpcode + 1],
2842 pVCpu->iem.s.abOpcode[offOpcode + 2],
2843 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2844# endif
2845 }
2846# endif
2847 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2848}
2849
2850#endif /* !IEM_WITH_SETJMP */
2851
2852
2853/**
2854 * Fetches the next opcode dword, returns automatically on failure.
2855 *
2856 * @param a_pu32 Where to return the opcode dword.
2857 * @remark Implicitly references pVCpu.
2858 */
2859#ifndef IEM_WITH_SETJMP
2860# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2861 do \
2862 { \
2863 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2864 if (rcStrict2 != VINF_SUCCESS) \
2865 return rcStrict2; \
2866 } while (0)
2867#else
2868# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2869#endif
2870
2871#ifndef IEM_WITH_SETJMP
2872
2873/**
2874 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2875 *
2876 * @returns Strict VBox status code.
2877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2878 * @param pu64 Where to return the opcode dword.
2879 */
2880DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2881{
2882 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2883 if (rcStrict == VINF_SUCCESS)
2884 {
2885 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2886 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2887 pVCpu->iem.s.abOpcode[offOpcode + 1],
2888 pVCpu->iem.s.abOpcode[offOpcode + 2],
2889 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2890 pVCpu->iem.s.offOpcode = offOpcode + 4;
2891 }
2892 else
2893 *pu64 = 0;
2894 return rcStrict;
2895}
2896
2897
2898/**
2899 * Fetches the next opcode dword, zero extending it to a quad word.
2900 *
2901 * @returns Strict VBox status code.
2902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2903 * @param pu64 Where to return the opcode quad word.
2904 */
2905DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2906{
2907 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2908 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2909 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2910
2911 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2912 pVCpu->iem.s.abOpcode[offOpcode + 1],
2913 pVCpu->iem.s.abOpcode[offOpcode + 2],
2914 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2915 pVCpu->iem.s.offOpcode = offOpcode + 4;
2916 return VINF_SUCCESS;
2917}
2918
2919#endif /* !IEM_WITH_SETJMP */
2920
2921
2922/**
2923 * Fetches the next opcode dword and zero extends it to a quad word, returns
2924 * automatically on failure.
2925 *
2926 * @param a_pu64 Where to return the opcode quad word.
2927 * @remark Implicitly references pVCpu.
2928 */
2929#ifndef IEM_WITH_SETJMP
2930# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2931 do \
2932 { \
2933 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2934 if (rcStrict2 != VINF_SUCCESS) \
2935 return rcStrict2; \
2936 } while (0)
2937#else
2938# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2939#endif
2940
2941
2942#ifndef IEM_WITH_SETJMP
2943/**
2944 * Fetches the next signed double word from the opcode stream.
2945 *
2946 * @returns Strict VBox status code.
2947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2948 * @param pi32 Where to return the signed double word.
2949 */
2950DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2951{
2952 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2953}
2954#endif
2955
2956/**
2957 * Fetches the next signed double word from the opcode stream, returning
2958 * automatically on failure.
2959 *
2960 * @param a_pi32 Where to return the signed double word.
2961 * @remark Implicitly references pVCpu.
2962 */
2963#ifndef IEM_WITH_SETJMP
2964# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2965 do \
2966 { \
2967 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2968 if (rcStrict2 != VINF_SUCCESS) \
2969 return rcStrict2; \
2970 } while (0)
2971#else
2972# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2973#endif
2974
2975#ifndef IEM_WITH_SETJMP
2976
2977/**
2978 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2979 *
2980 * @returns Strict VBox status code.
2981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2982 * @param pu64 Where to return the opcode qword.
2983 */
2984DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2985{
2986 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2987 if (rcStrict == VINF_SUCCESS)
2988 {
2989 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2990 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2991 pVCpu->iem.s.abOpcode[offOpcode + 1],
2992 pVCpu->iem.s.abOpcode[offOpcode + 2],
2993 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2994 pVCpu->iem.s.offOpcode = offOpcode + 4;
2995 }
2996 else
2997 *pu64 = 0;
2998 return rcStrict;
2999}
3000
3001
3002/**
3003 * Fetches the next opcode dword, sign extending it into a quad word.
3004 *
3005 * @returns Strict VBox status code.
3006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3007 * @param pu64 Where to return the opcode quad word.
3008 */
3009DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3010{
3011 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3012 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3013 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3014
3015 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3016 pVCpu->iem.s.abOpcode[offOpcode + 1],
3017 pVCpu->iem.s.abOpcode[offOpcode + 2],
3018 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3019 *pu64 = i32;
3020 pVCpu->iem.s.offOpcode = offOpcode + 4;
3021 return VINF_SUCCESS;
3022}
3023
3024#endif /* !IEM_WITH_SETJMP */
3025
3026
3027/**
3028 * Fetches the next opcode double word and sign extends it to a quad word,
3029 * returns automatically on failure.
3030 *
3031 * @param a_pu64 Where to return the opcode quad word.
3032 * @remark Implicitly references pVCpu.
3033 */
3034#ifndef IEM_WITH_SETJMP
3035# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3036 do \
3037 { \
3038 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3039 if (rcStrict2 != VINF_SUCCESS) \
3040 return rcStrict2; \
3041 } while (0)
3042#else
3043# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3044#endif
3045
3046#ifndef IEM_WITH_SETJMP
3047
3048/**
3049 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3050 *
3051 * @returns Strict VBox status code.
3052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3053 * @param pu64 Where to return the opcode qword.
3054 */
3055DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3056{
3057 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3058 if (rcStrict == VINF_SUCCESS)
3059 {
3060 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3061# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3062 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3063# else
3064 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3065 pVCpu->iem.s.abOpcode[offOpcode + 1],
3066 pVCpu->iem.s.abOpcode[offOpcode + 2],
3067 pVCpu->iem.s.abOpcode[offOpcode + 3],
3068 pVCpu->iem.s.abOpcode[offOpcode + 4],
3069 pVCpu->iem.s.abOpcode[offOpcode + 5],
3070 pVCpu->iem.s.abOpcode[offOpcode + 6],
3071 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3072# endif
3073 pVCpu->iem.s.offOpcode = offOpcode + 8;
3074 }
3075 else
3076 *pu64 = 0;
3077 return rcStrict;
3078}
3079
3080
3081/**
3082 * Fetches the next opcode qword.
3083 *
3084 * @returns Strict VBox status code.
3085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3086 * @param pu64 Where to return the opcode qword.
3087 */
3088DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3089{
3090 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3091 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3092 {
3093# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3094 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3095# else
3096 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3097 pVCpu->iem.s.abOpcode[offOpcode + 1],
3098 pVCpu->iem.s.abOpcode[offOpcode + 2],
3099 pVCpu->iem.s.abOpcode[offOpcode + 3],
3100 pVCpu->iem.s.abOpcode[offOpcode + 4],
3101 pVCpu->iem.s.abOpcode[offOpcode + 5],
3102 pVCpu->iem.s.abOpcode[offOpcode + 6],
3103 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3104# endif
3105 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3106 return VINF_SUCCESS;
3107 }
3108 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3109}
3110
3111#else /* IEM_WITH_SETJMP */
3112
3113/**
3114 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3115 *
3116 * @returns The opcode qword.
3117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3118 */
3119DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3120{
3121# ifdef IEM_WITH_CODE_TLB
3122 uint64_t u64;
3123 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3124 return u64;
3125# else
3126 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3127 if (rcStrict == VINF_SUCCESS)
3128 {
3129 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3130 pVCpu->iem.s.offOpcode = offOpcode + 8;
3131# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3132 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3133# else
3134 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3135 pVCpu->iem.s.abOpcode[offOpcode + 1],
3136 pVCpu->iem.s.abOpcode[offOpcode + 2],
3137 pVCpu->iem.s.abOpcode[offOpcode + 3],
3138 pVCpu->iem.s.abOpcode[offOpcode + 4],
3139 pVCpu->iem.s.abOpcode[offOpcode + 5],
3140 pVCpu->iem.s.abOpcode[offOpcode + 6],
3141 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3142# endif
3143 }
3144 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3145# endif
3146}
3147
3148
3149/**
3150 * Fetches the next opcode qword, longjmp on error.
3151 *
3152 * @returns The opcode qword.
3153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3154 */
3155DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3156{
3157# ifdef IEM_WITH_CODE_TLB
3158 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3159 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3160 if (RT_LIKELY( pbBuf != NULL
3161 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3162 {
3163 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3164# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3165 return *(uint64_t const *)&pbBuf[offBuf];
3166# else
3167 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3168 pbBuf[offBuf + 1],
3169 pbBuf[offBuf + 2],
3170 pbBuf[offBuf + 3],
3171 pbBuf[offBuf + 4],
3172 pbBuf[offBuf + 5],
3173 pbBuf[offBuf + 6],
3174 pbBuf[offBuf + 7]);
3175# endif
3176 }
3177# else
3178 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3179 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3180 {
3181 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3182# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3183 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3184# else
3185 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3186 pVCpu->iem.s.abOpcode[offOpcode + 1],
3187 pVCpu->iem.s.abOpcode[offOpcode + 2],
3188 pVCpu->iem.s.abOpcode[offOpcode + 3],
3189 pVCpu->iem.s.abOpcode[offOpcode + 4],
3190 pVCpu->iem.s.abOpcode[offOpcode + 5],
3191 pVCpu->iem.s.abOpcode[offOpcode + 6],
3192 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3193# endif
3194 }
3195# endif
3196 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3197}
3198
3199#endif /* IEM_WITH_SETJMP */
3200
3201/**
3202 * Fetches the next opcode quad word, returns automatically on failure.
3203 *
3204 * @param a_pu64 Where to return the opcode quad word.
3205 * @remark Implicitly references pVCpu.
3206 */
3207#ifndef IEM_WITH_SETJMP
3208# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3209 do \
3210 { \
3211 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3212 if (rcStrict2 != VINF_SUCCESS) \
3213 return rcStrict2; \
3214 } while (0)
3215#else
3216# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3217#endif
3218
3219
3220/** @name Misc Worker Functions.
3221 * @{
3222 */
3223
3224/**
3225 * Gets the exception class for the specified exception vector.
3226 *
3227 * @returns The class of the specified exception.
3228 * @param uVector The exception vector.
3229 */
3230IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3231{
3232 Assert(uVector <= X86_XCPT_LAST);
3233 switch (uVector)
3234 {
3235 case X86_XCPT_DE:
3236 case X86_XCPT_TS:
3237 case X86_XCPT_NP:
3238 case X86_XCPT_SS:
3239 case X86_XCPT_GP:
3240 case X86_XCPT_SX: /* AMD only */
3241 return IEMXCPTCLASS_CONTRIBUTORY;
3242
3243 case X86_XCPT_PF:
3244 case X86_XCPT_VE: /* Intel only */
3245 return IEMXCPTCLASS_PAGE_FAULT;
3246 }
3247 return IEMXCPTCLASS_BENIGN;
3248}
3249
3250
3251/**
3252 * Evaluates how to handle an exception caused during delivery of another event
3253 * (exception / interrupt).
3254 *
3255 * @returns How to handle the recursive exception.
3256 * @param pVCpu The cross context virtual CPU structure of the
3257 * calling thread.
3258 * @param fPrevFlags The flags of the previous event.
3259 * @param uPrevVector The vector of the previous event.
3260 * @param fCurFlags The flags of the current exception.
3261 * @param uCurVector The vector of the current exception.
3262 * @param pfXcptRaiseInfo Where to store additional information about the
3263 * exception condition. Optional.
3264 */
3265VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3266 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3267{
3268 /*
3269 * Only CPU exceptions can be raised while delivering other events, software interrupt
3270 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3271 */
3272 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3273 Assert(pVCpu); RT_NOREF(pVCpu);
3274
3275 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3276 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3277 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3278 {
3279 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3280 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3281 {
3282 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3283 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3284 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3285 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3286 {
3287 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3288 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3289 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3290 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3291 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3292 }
3293 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3294 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3295 {
3296 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3297 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%u uCurVector=%u -> #DF\n", uPrevVector, uCurVector));
3298 }
3299 else if ( uPrevVector == X86_XCPT_DF
3300 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3301 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3302 {
3303 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3304 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3305 }
3306 }
3307 else
3308 {
3309 if (uPrevVector == X86_XCPT_NMI)
3310 {
3311 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3312 if (uCurVector == X86_XCPT_PF)
3313 {
3314 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3315 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3316 }
3317 }
3318 else if ( uPrevVector == X86_XCPT_AC
3319 && uCurVector == X86_XCPT_AC)
3320 {
3321 enmRaise = IEMXCPTRAISE_CPU_HANG;
3322 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3323 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3324 }
3325 }
3326 }
3327 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3328 {
3329 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3330 if (uCurVector == X86_XCPT_PF)
3331 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3332 }
3333 else
3334 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3335
3336 if (pfXcptRaiseInfo)
3337 *pfXcptRaiseInfo = fRaiseInfo;
3338 return enmRaise;
3339}
3340
3341
3342/**
3343 * Enters the CPU shutdown state initiated by a triple fault or other
3344 * unrecoverable conditions.
3345 *
3346 * @returns Strict VBox status code.
3347 * @param pVCpu The cross context virtual CPU structure of the
3348 * calling thread.
3349 */
3350IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3351{
3352 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3353 {
3354 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3355 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3356 }
3357
3358 RT_NOREF(pVCpu);
3359 return VINF_EM_TRIPLE_FAULT;
3360}
3361
3362
3363#ifdef VBOX_WITH_NESTED_HWVIRT
3364IEM_STATIC VBOXSTRICTRC iemHandleSvmNstGstEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
3365 uint32_t uErr, uint64_t uCr2)
3366{
3367 Assert(IEM_IS_SVM_ENABLED(pVCpu));
3368
3369 /*
3370 * Handle nested-guest SVM exception and software interrupt intercepts,
3371 * see AMD spec. 15.12 "Exception Intercepts".
3372 *
3373 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.
3374 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
3375 * even when they use a vector in the range 0 to 31.
3376 * - ICEBP should not trigger #DB intercept, but its own intercept.
3377 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
3378 */
3379 /* Check NMI intercept */
3380 if ( u8Vector == X86_XCPT_NMI
3381 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3382 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
3383 {
3384 Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));
3385 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3386 }
3387
3388 /* Check ICEBP intercept. */
3389 if ( (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
3390 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_ICEBP))
3391 {
3392 Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));
3393 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3394 }
3395
3396 /* Check CPU exception intercepts. */
3397 if ( (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3398 && IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector))
3399 {
3400 Assert(u8Vector <= X86_XCPT_LAST);
3401 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
3402 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
3403 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist
3404 && u8Vector == X86_XCPT_PF
3405 && !(uErr & X86_TRAP_PF_ID))
3406 {
3407 /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */
3408#ifdef IEM_WITH_CODE_TLB
3409#else
3410 uint8_t const offOpCode = pVCpu->iem.s.offOpcode;
3411 uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;
3412 if ( cbCurrent > 0
3413 && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr))
3414 {
3415 Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));
3416 memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);
3417 }
3418#endif
3419 }
3420 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept. u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n",
3421 u8Vector, uExitInfo1, uExitInfo2));
3422 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);
3423 }
3424
3425 /* Check software interrupt (INTn) intercepts. */
3426 if ( (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3427 | IEM_XCPT_FLAGS_BP_INSTR
3428 | IEM_XCPT_FLAGS_ICEBP_INSTR
3429 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3430 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN))
3431 {
3432 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0;
3433 Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
3434 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
3435 }
3436
3437 return VINF_HM_INTERCEPT_NOT_ACTIVE;
3438}
3439#endif
3440
3441/**
3442 * Validates a new SS segment.
3443 *
3444 * @returns VBox strict status code.
3445 * @param pVCpu The cross context virtual CPU structure of the
3446 * calling thread.
3447 * @param pCtx The CPU context.
3448 * @param NewSS The new SS selctor.
3449 * @param uCpl The CPL to load the stack for.
3450 * @param pDesc Where to return the descriptor.
3451 */
3452IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3453{
3454 NOREF(pCtx);
3455
3456 /* Null selectors are not allowed (we're not called for dispatching
3457 interrupts with SS=0 in long mode). */
3458 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3459 {
3460 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3461 return iemRaiseTaskSwitchFault0(pVCpu);
3462 }
3463
3464 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3465 if ((NewSS & X86_SEL_RPL) != uCpl)
3466 {
3467 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3468 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3469 }
3470
3471 /*
3472 * Read the descriptor.
3473 */
3474 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3475 if (rcStrict != VINF_SUCCESS)
3476 return rcStrict;
3477
3478 /*
3479 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3480 */
3481 if (!pDesc->Legacy.Gen.u1DescType)
3482 {
3483 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3484 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3485 }
3486
3487 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3488 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3489 {
3490 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3491 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3492 }
3493 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3494 {
3495 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3496 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3497 }
3498
3499 /* Is it there? */
3500 /** @todo testcase: Is this checked before the canonical / limit check below? */
3501 if (!pDesc->Legacy.Gen.u1Present)
3502 {
3503 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3504 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3505 }
3506
3507 return VINF_SUCCESS;
3508}
3509
3510
3511/**
3512 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3513 * not.
3514 *
3515 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3516 * @param a_pCtx The CPU context.
3517 */
3518#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3519# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3520 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3521 ? (a_pCtx)->eflags.u \
3522 : CPUMRawGetEFlags(a_pVCpu) )
3523#else
3524# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3525 ( (a_pCtx)->eflags.u )
3526#endif
3527
3528/**
3529 * Updates the EFLAGS in the correct manner wrt. PATM.
3530 *
3531 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3532 * @param a_pCtx The CPU context.
3533 * @param a_fEfl The new EFLAGS.
3534 */
3535#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3536# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3537 do { \
3538 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3539 (a_pCtx)->eflags.u = (a_fEfl); \
3540 else \
3541 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3542 } while (0)
3543#else
3544# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3545 do { \
3546 (a_pCtx)->eflags.u = (a_fEfl); \
3547 } while (0)
3548#endif
3549
3550
3551/** @} */
3552
3553/** @name Raising Exceptions.
3554 *
3555 * @{
3556 */
3557
3558
3559/**
3560 * Loads the specified stack far pointer from the TSS.
3561 *
3562 * @returns VBox strict status code.
3563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3564 * @param pCtx The CPU context.
3565 * @param uCpl The CPL to load the stack for.
3566 * @param pSelSS Where to return the new stack segment.
3567 * @param puEsp Where to return the new stack pointer.
3568 */
3569IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3570 PRTSEL pSelSS, uint32_t *puEsp)
3571{
3572 VBOXSTRICTRC rcStrict;
3573 Assert(uCpl < 4);
3574
3575 switch (pCtx->tr.Attr.n.u4Type)
3576 {
3577 /*
3578 * 16-bit TSS (X86TSS16).
3579 */
3580 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); /* fall thru */
3581 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3582 {
3583 uint32_t off = uCpl * 4 + 2;
3584 if (off + 4 <= pCtx->tr.u32Limit)
3585 {
3586 /** @todo check actual access pattern here. */
3587 uint32_t u32Tmp = 0; /* gcc maybe... */
3588 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3589 if (rcStrict == VINF_SUCCESS)
3590 {
3591 *puEsp = RT_LOWORD(u32Tmp);
3592 *pSelSS = RT_HIWORD(u32Tmp);
3593 return VINF_SUCCESS;
3594 }
3595 }
3596 else
3597 {
3598 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3599 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3600 }
3601 break;
3602 }
3603
3604 /*
3605 * 32-bit TSS (X86TSS32).
3606 */
3607 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); /* fall thru */
3608 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3609 {
3610 uint32_t off = uCpl * 8 + 4;
3611 if (off + 7 <= pCtx->tr.u32Limit)
3612 {
3613/** @todo check actual access pattern here. */
3614 uint64_t u64Tmp;
3615 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3616 if (rcStrict == VINF_SUCCESS)
3617 {
3618 *puEsp = u64Tmp & UINT32_MAX;
3619 *pSelSS = (RTSEL)(u64Tmp >> 32);
3620 return VINF_SUCCESS;
3621 }
3622 }
3623 else
3624 {
3625 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3626 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3627 }
3628 break;
3629 }
3630
3631 default:
3632 AssertFailed();
3633 rcStrict = VERR_IEM_IPE_4;
3634 break;
3635 }
3636
3637 *puEsp = 0; /* make gcc happy */
3638 *pSelSS = 0; /* make gcc happy */
3639 return rcStrict;
3640}
3641
3642
3643/**
3644 * Loads the specified stack pointer from the 64-bit TSS.
3645 *
3646 * @returns VBox strict status code.
3647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3648 * @param pCtx The CPU context.
3649 * @param uCpl The CPL to load the stack for.
3650 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3651 * @param puRsp Where to return the new stack pointer.
3652 */
3653IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3654{
3655 Assert(uCpl < 4);
3656 Assert(uIst < 8);
3657 *puRsp = 0; /* make gcc happy */
3658
3659 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3660
3661 uint32_t off;
3662 if (uIst)
3663 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3664 else
3665 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3666 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3667 {
3668 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3669 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3670 }
3671
3672 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3673}
3674
3675
3676/**
3677 * Adjust the CPU state according to the exception being raised.
3678 *
3679 * @param pCtx The CPU context.
3680 * @param u8Vector The exception that has been raised.
3681 */
3682DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3683{
3684 switch (u8Vector)
3685 {
3686 case X86_XCPT_DB:
3687 pCtx->dr[7] &= ~X86_DR7_GD;
3688 break;
3689 /** @todo Read the AMD and Intel exception reference... */
3690 }
3691}
3692
3693
3694/**
3695 * Implements exceptions and interrupts for real mode.
3696 *
3697 * @returns VBox strict status code.
3698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3699 * @param pCtx The CPU context.
3700 * @param cbInstr The number of bytes to offset rIP by in the return
3701 * address.
3702 * @param u8Vector The interrupt / exception vector number.
3703 * @param fFlags The flags.
3704 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3705 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3706 */
3707IEM_STATIC VBOXSTRICTRC
3708iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3709 PCPUMCTX pCtx,
3710 uint8_t cbInstr,
3711 uint8_t u8Vector,
3712 uint32_t fFlags,
3713 uint16_t uErr,
3714 uint64_t uCr2)
3715{
3716 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3717 NOREF(uErr); NOREF(uCr2);
3718
3719 /*
3720 * Read the IDT entry.
3721 */
3722 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3723 {
3724 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3725 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3726 }
3727 RTFAR16 Idte;
3728 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3729 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3730 return rcStrict;
3731
3732 /*
3733 * Push the stack frame.
3734 */
3735 uint16_t *pu16Frame;
3736 uint64_t uNewRsp;
3737 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3738 if (rcStrict != VINF_SUCCESS)
3739 return rcStrict;
3740
3741 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3742#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3743 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3744 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3745 fEfl |= UINT16_C(0xf000);
3746#endif
3747 pu16Frame[2] = (uint16_t)fEfl;
3748 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3749 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3750 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3751 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3752 return rcStrict;
3753
3754 /*
3755 * Load the vector address into cs:ip and make exception specific state
3756 * adjustments.
3757 */
3758 pCtx->cs.Sel = Idte.sel;
3759 pCtx->cs.ValidSel = Idte.sel;
3760 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3761 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3762 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3763 pCtx->rip = Idte.off;
3764 fEfl &= ~X86_EFL_IF;
3765 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3766
3767 /** @todo do we actually do this in real mode? */
3768 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3769 iemRaiseXcptAdjustState(pCtx, u8Vector);
3770
3771 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3772}
3773
3774
3775/**
3776 * Loads a NULL data selector into when coming from V8086 mode.
3777 *
3778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3779 * @param pSReg Pointer to the segment register.
3780 */
3781IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3782{
3783 pSReg->Sel = 0;
3784 pSReg->ValidSel = 0;
3785 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3786 {
3787 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3788 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3789 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3790 }
3791 else
3792 {
3793 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3794 /** @todo check this on AMD-V */
3795 pSReg->u64Base = 0;
3796 pSReg->u32Limit = 0;
3797 }
3798}
3799
3800
3801/**
3802 * Loads a segment selector during a task switch in V8086 mode.
3803 *
3804 * @param pSReg Pointer to the segment register.
3805 * @param uSel The selector value to load.
3806 */
3807IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3808{
3809 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3810 pSReg->Sel = uSel;
3811 pSReg->ValidSel = uSel;
3812 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3813 pSReg->u64Base = uSel << 4;
3814 pSReg->u32Limit = 0xffff;
3815 pSReg->Attr.u = 0xf3;
3816}
3817
3818
3819/**
3820 * Loads a NULL data selector into a selector register, both the hidden and
3821 * visible parts, in protected mode.
3822 *
3823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3824 * @param pSReg Pointer to the segment register.
3825 * @param uRpl The RPL.
3826 */
3827IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3828{
3829 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3830 * data selector in protected mode. */
3831 pSReg->Sel = uRpl;
3832 pSReg->ValidSel = uRpl;
3833 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3834 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3835 {
3836 /* VT-x (Intel 3960x) observed doing something like this. */
3837 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3838 pSReg->u32Limit = UINT32_MAX;
3839 pSReg->u64Base = 0;
3840 }
3841 else
3842 {
3843 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3844 pSReg->u32Limit = 0;
3845 pSReg->u64Base = 0;
3846 }
3847}
3848
3849
3850/**
3851 * Loads a segment selector during a task switch in protected mode.
3852 *
3853 * In this task switch scenario, we would throw \#TS exceptions rather than
3854 * \#GPs.
3855 *
3856 * @returns VBox strict status code.
3857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3858 * @param pSReg Pointer to the segment register.
3859 * @param uSel The new selector value.
3860 *
3861 * @remarks This does _not_ handle CS or SS.
3862 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3863 */
3864IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3865{
3866 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3867
3868 /* Null data selector. */
3869 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3870 {
3871 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3872 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3873 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3874 return VINF_SUCCESS;
3875 }
3876
3877 /* Fetch the descriptor. */
3878 IEMSELDESC Desc;
3879 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3880 if (rcStrict != VINF_SUCCESS)
3881 {
3882 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3883 VBOXSTRICTRC_VAL(rcStrict)));
3884 return rcStrict;
3885 }
3886
3887 /* Must be a data segment or readable code segment. */
3888 if ( !Desc.Legacy.Gen.u1DescType
3889 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3890 {
3891 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3892 Desc.Legacy.Gen.u4Type));
3893 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3894 }
3895
3896 /* Check privileges for data segments and non-conforming code segments. */
3897 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3898 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3899 {
3900 /* The RPL and the new CPL must be less than or equal to the DPL. */
3901 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3902 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3903 {
3904 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3905 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3906 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3907 }
3908 }
3909
3910 /* Is it there? */
3911 if (!Desc.Legacy.Gen.u1Present)
3912 {
3913 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3914 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3915 }
3916
3917 /* The base and limit. */
3918 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3919 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3920
3921 /*
3922 * Ok, everything checked out fine. Now set the accessed bit before
3923 * committing the result into the registers.
3924 */
3925 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3926 {
3927 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3928 if (rcStrict != VINF_SUCCESS)
3929 return rcStrict;
3930 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3931 }
3932
3933 /* Commit */
3934 pSReg->Sel = uSel;
3935 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3936 pSReg->u32Limit = cbLimit;
3937 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3938 pSReg->ValidSel = uSel;
3939 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3940 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3941 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3942
3943 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3944 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3945 return VINF_SUCCESS;
3946}
3947
3948
3949/**
3950 * Performs a task switch.
3951 *
3952 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3953 * caller is responsible for performing the necessary checks (like DPL, TSS
3954 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3955 * reference for JMP, CALL, IRET.
3956 *
3957 * If the task switch is the due to a software interrupt or hardware exception,
3958 * the caller is responsible for validating the TSS selector and descriptor. See
3959 * Intel Instruction reference for INT n.
3960 *
3961 * @returns VBox strict status code.
3962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3963 * @param pCtx The CPU context.
3964 * @param enmTaskSwitch What caused this task switch.
3965 * @param uNextEip The EIP effective after the task switch.
3966 * @param fFlags The flags.
3967 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3968 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3969 * @param SelTSS The TSS selector of the new task.
3970 * @param pNewDescTSS Pointer to the new TSS descriptor.
3971 */
3972IEM_STATIC VBOXSTRICTRC
3973iemTaskSwitch(PVMCPU pVCpu,
3974 PCPUMCTX pCtx,
3975 IEMTASKSWITCH enmTaskSwitch,
3976 uint32_t uNextEip,
3977 uint32_t fFlags,
3978 uint16_t uErr,
3979 uint64_t uCr2,
3980 RTSEL SelTSS,
3981 PIEMSELDESC pNewDescTSS)
3982{
3983 Assert(!IEM_IS_REAL_MODE(pVCpu));
3984 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3985
3986 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3987 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3988 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3989 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3990 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3991
3992 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3993 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3994
3995 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3996 fIsNewTSS386, pCtx->eip, uNextEip));
3997
3998 /* Update CR2 in case it's a page-fault. */
3999 /** @todo This should probably be done much earlier in IEM/PGM. See
4000 * @bugref{5653#c49}. */
4001 if (fFlags & IEM_XCPT_FLAGS_CR2)
4002 pCtx->cr2 = uCr2;
4003
4004 /*
4005 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4006 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4007 */
4008 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4009 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4010 if (uNewTSSLimit < uNewTSSLimitMin)
4011 {
4012 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4013 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4014 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4015 }
4016
4017 /*
4018 * Check the current TSS limit. The last written byte to the current TSS during the
4019 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4020 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4021 *
4022 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4023 * end up with smaller than "legal" TSS limits.
4024 */
4025 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
4026 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4027 if (uCurTSSLimit < uCurTSSLimitMin)
4028 {
4029 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4030 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4031 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4032 }
4033
4034 /*
4035 * Verify that the new TSS can be accessed and map it. Map only the required contents
4036 * and not the entire TSS.
4037 */
4038 void *pvNewTSS;
4039 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4040 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4041 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4042 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4043 * not perform correct translation if this happens. See Intel spec. 7.2.1
4044 * "Task-State Segment" */
4045 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4046 if (rcStrict != VINF_SUCCESS)
4047 {
4048 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4049 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4050 return rcStrict;
4051 }
4052
4053 /*
4054 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4055 */
4056 uint32_t u32EFlags = pCtx->eflags.u32;
4057 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4058 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4059 {
4060 PX86DESC pDescCurTSS;
4061 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4062 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4063 if (rcStrict != VINF_SUCCESS)
4064 {
4065 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4066 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4067 return rcStrict;
4068 }
4069
4070 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4071 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4072 if (rcStrict != VINF_SUCCESS)
4073 {
4074 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4075 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4076 return rcStrict;
4077 }
4078
4079 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4080 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4081 {
4082 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4083 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4084 u32EFlags &= ~X86_EFL_NT;
4085 }
4086 }
4087
4088 /*
4089 * Save the CPU state into the current TSS.
4090 */
4091 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4092 if (GCPtrNewTSS == GCPtrCurTSS)
4093 {
4094 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4095 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4096 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4097 }
4098 if (fIsNewTSS386)
4099 {
4100 /*
4101 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4102 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4103 */
4104 void *pvCurTSS32;
4105 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4106 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4107 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4108 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4109 if (rcStrict != VINF_SUCCESS)
4110 {
4111 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4112 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4113 return rcStrict;
4114 }
4115
4116 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4117 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4118 pCurTSS32->eip = uNextEip;
4119 pCurTSS32->eflags = u32EFlags;
4120 pCurTSS32->eax = pCtx->eax;
4121 pCurTSS32->ecx = pCtx->ecx;
4122 pCurTSS32->edx = pCtx->edx;
4123 pCurTSS32->ebx = pCtx->ebx;
4124 pCurTSS32->esp = pCtx->esp;
4125 pCurTSS32->ebp = pCtx->ebp;
4126 pCurTSS32->esi = pCtx->esi;
4127 pCurTSS32->edi = pCtx->edi;
4128 pCurTSS32->es = pCtx->es.Sel;
4129 pCurTSS32->cs = pCtx->cs.Sel;
4130 pCurTSS32->ss = pCtx->ss.Sel;
4131 pCurTSS32->ds = pCtx->ds.Sel;
4132 pCurTSS32->fs = pCtx->fs.Sel;
4133 pCurTSS32->gs = pCtx->gs.Sel;
4134
4135 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4136 if (rcStrict != VINF_SUCCESS)
4137 {
4138 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4139 VBOXSTRICTRC_VAL(rcStrict)));
4140 return rcStrict;
4141 }
4142 }
4143 else
4144 {
4145 /*
4146 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4147 */
4148 void *pvCurTSS16;
4149 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4150 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4151 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4152 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4153 if (rcStrict != VINF_SUCCESS)
4154 {
4155 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4156 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4157 return rcStrict;
4158 }
4159
4160 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4161 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4162 pCurTSS16->ip = uNextEip;
4163 pCurTSS16->flags = u32EFlags;
4164 pCurTSS16->ax = pCtx->ax;
4165 pCurTSS16->cx = pCtx->cx;
4166 pCurTSS16->dx = pCtx->dx;
4167 pCurTSS16->bx = pCtx->bx;
4168 pCurTSS16->sp = pCtx->sp;
4169 pCurTSS16->bp = pCtx->bp;
4170 pCurTSS16->si = pCtx->si;
4171 pCurTSS16->di = pCtx->di;
4172 pCurTSS16->es = pCtx->es.Sel;
4173 pCurTSS16->cs = pCtx->cs.Sel;
4174 pCurTSS16->ss = pCtx->ss.Sel;
4175 pCurTSS16->ds = pCtx->ds.Sel;
4176
4177 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4178 if (rcStrict != VINF_SUCCESS)
4179 {
4180 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4181 VBOXSTRICTRC_VAL(rcStrict)));
4182 return rcStrict;
4183 }
4184 }
4185
4186 /*
4187 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4188 */
4189 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4190 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4191 {
4192 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4193 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4194 pNewTSS->selPrev = pCtx->tr.Sel;
4195 }
4196
4197 /*
4198 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4199 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4200 */
4201 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4202 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4203 bool fNewDebugTrap;
4204 if (fIsNewTSS386)
4205 {
4206 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4207 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4208 uNewEip = pNewTSS32->eip;
4209 uNewEflags = pNewTSS32->eflags;
4210 uNewEax = pNewTSS32->eax;
4211 uNewEcx = pNewTSS32->ecx;
4212 uNewEdx = pNewTSS32->edx;
4213 uNewEbx = pNewTSS32->ebx;
4214 uNewEsp = pNewTSS32->esp;
4215 uNewEbp = pNewTSS32->ebp;
4216 uNewEsi = pNewTSS32->esi;
4217 uNewEdi = pNewTSS32->edi;
4218 uNewES = pNewTSS32->es;
4219 uNewCS = pNewTSS32->cs;
4220 uNewSS = pNewTSS32->ss;
4221 uNewDS = pNewTSS32->ds;
4222 uNewFS = pNewTSS32->fs;
4223 uNewGS = pNewTSS32->gs;
4224 uNewLdt = pNewTSS32->selLdt;
4225 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4226 }
4227 else
4228 {
4229 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4230 uNewCr3 = 0;
4231 uNewEip = pNewTSS16->ip;
4232 uNewEflags = pNewTSS16->flags;
4233 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4234 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4235 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4236 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4237 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4238 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4239 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4240 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4241 uNewES = pNewTSS16->es;
4242 uNewCS = pNewTSS16->cs;
4243 uNewSS = pNewTSS16->ss;
4244 uNewDS = pNewTSS16->ds;
4245 uNewFS = 0;
4246 uNewGS = 0;
4247 uNewLdt = pNewTSS16->selLdt;
4248 fNewDebugTrap = false;
4249 }
4250
4251 if (GCPtrNewTSS == GCPtrCurTSS)
4252 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4253 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4254
4255 /*
4256 * We're done accessing the new TSS.
4257 */
4258 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4259 if (rcStrict != VINF_SUCCESS)
4260 {
4261 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4262 return rcStrict;
4263 }
4264
4265 /*
4266 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4267 */
4268 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4269 {
4270 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4271 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4272 if (rcStrict != VINF_SUCCESS)
4273 {
4274 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4275 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4276 return rcStrict;
4277 }
4278
4279 /* Check that the descriptor indicates the new TSS is available (not busy). */
4280 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4281 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4282 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4283
4284 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4285 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4286 if (rcStrict != VINF_SUCCESS)
4287 {
4288 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4289 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4290 return rcStrict;
4291 }
4292 }
4293
4294 /*
4295 * From this point on, we're technically in the new task. We will defer exceptions
4296 * until the completion of the task switch but before executing any instructions in the new task.
4297 */
4298 pCtx->tr.Sel = SelTSS;
4299 pCtx->tr.ValidSel = SelTSS;
4300 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4301 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4302 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4303 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4304 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4305
4306 /* Set the busy bit in TR. */
4307 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4308 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4309 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4310 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4311 {
4312 uNewEflags |= X86_EFL_NT;
4313 }
4314
4315 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4316 pCtx->cr0 |= X86_CR0_TS;
4317 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4318
4319 pCtx->eip = uNewEip;
4320 pCtx->eax = uNewEax;
4321 pCtx->ecx = uNewEcx;
4322 pCtx->edx = uNewEdx;
4323 pCtx->ebx = uNewEbx;
4324 pCtx->esp = uNewEsp;
4325 pCtx->ebp = uNewEbp;
4326 pCtx->esi = uNewEsi;
4327 pCtx->edi = uNewEdi;
4328
4329 uNewEflags &= X86_EFL_LIVE_MASK;
4330 uNewEflags |= X86_EFL_RA1_MASK;
4331 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4332
4333 /*
4334 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4335 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4336 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4337 */
4338 pCtx->es.Sel = uNewES;
4339 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4340
4341 pCtx->cs.Sel = uNewCS;
4342 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4343
4344 pCtx->ss.Sel = uNewSS;
4345 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4346
4347 pCtx->ds.Sel = uNewDS;
4348 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4349
4350 pCtx->fs.Sel = uNewFS;
4351 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4352
4353 pCtx->gs.Sel = uNewGS;
4354 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4355 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4356
4357 pCtx->ldtr.Sel = uNewLdt;
4358 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4359 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4360 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4361
4362 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4363 {
4364 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4365 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4366 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4367 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4368 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4369 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4370 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4371 }
4372
4373 /*
4374 * Switch CR3 for the new task.
4375 */
4376 if ( fIsNewTSS386
4377 && (pCtx->cr0 & X86_CR0_PG))
4378 {
4379 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4380 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4381 {
4382 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4383 AssertRCSuccessReturn(rc, rc);
4384 }
4385 else
4386 pCtx->cr3 = uNewCr3;
4387
4388 /* Inform PGM. */
4389 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4390 {
4391 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4392 AssertRCReturn(rc, rc);
4393 /* ignore informational status codes */
4394 }
4395 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4396 }
4397
4398 /*
4399 * Switch LDTR for the new task.
4400 */
4401 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4402 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4403 else
4404 {
4405 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4406
4407 IEMSELDESC DescNewLdt;
4408 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4409 if (rcStrict != VINF_SUCCESS)
4410 {
4411 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4412 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4413 return rcStrict;
4414 }
4415 if ( !DescNewLdt.Legacy.Gen.u1Present
4416 || DescNewLdt.Legacy.Gen.u1DescType
4417 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4418 {
4419 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4420 uNewLdt, DescNewLdt.Legacy.u));
4421 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4422 }
4423
4424 pCtx->ldtr.ValidSel = uNewLdt;
4425 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4426 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4427 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4428 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4429 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4430 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4431 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4432 }
4433
4434 IEMSELDESC DescSS;
4435 if (IEM_IS_V86_MODE(pVCpu))
4436 {
4437 pVCpu->iem.s.uCpl = 3;
4438 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4439 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4440 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4441 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4442 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4443 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4444
4445 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4446 DescSS.Legacy.u = 0;
4447 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4448 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4449 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4450 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4451 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4452 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4453 DescSS.Legacy.Gen.u2Dpl = 3;
4454 }
4455 else
4456 {
4457 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4458
4459 /*
4460 * Load the stack segment for the new task.
4461 */
4462 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4463 {
4464 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4465 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4466 }
4467
4468 /* Fetch the descriptor. */
4469 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4470 if (rcStrict != VINF_SUCCESS)
4471 {
4472 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4473 VBOXSTRICTRC_VAL(rcStrict)));
4474 return rcStrict;
4475 }
4476
4477 /* SS must be a data segment and writable. */
4478 if ( !DescSS.Legacy.Gen.u1DescType
4479 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4480 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4481 {
4482 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4483 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4484 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4485 }
4486
4487 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4488 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4489 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4490 {
4491 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4492 uNewCpl));
4493 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4494 }
4495
4496 /* Is it there? */
4497 if (!DescSS.Legacy.Gen.u1Present)
4498 {
4499 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4500 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4501 }
4502
4503 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4504 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4505
4506 /* Set the accessed bit before committing the result into SS. */
4507 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4508 {
4509 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4510 if (rcStrict != VINF_SUCCESS)
4511 return rcStrict;
4512 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4513 }
4514
4515 /* Commit SS. */
4516 pCtx->ss.Sel = uNewSS;
4517 pCtx->ss.ValidSel = uNewSS;
4518 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4519 pCtx->ss.u32Limit = cbLimit;
4520 pCtx->ss.u64Base = u64Base;
4521 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4522 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4523
4524 /* CPL has changed, update IEM before loading rest of segments. */
4525 pVCpu->iem.s.uCpl = uNewCpl;
4526
4527 /*
4528 * Load the data segments for the new task.
4529 */
4530 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4531 if (rcStrict != VINF_SUCCESS)
4532 return rcStrict;
4533 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4534 if (rcStrict != VINF_SUCCESS)
4535 return rcStrict;
4536 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4537 if (rcStrict != VINF_SUCCESS)
4538 return rcStrict;
4539 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4540 if (rcStrict != VINF_SUCCESS)
4541 return rcStrict;
4542
4543 /*
4544 * Load the code segment for the new task.
4545 */
4546 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4547 {
4548 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4549 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4550 }
4551
4552 /* Fetch the descriptor. */
4553 IEMSELDESC DescCS;
4554 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4555 if (rcStrict != VINF_SUCCESS)
4556 {
4557 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4558 return rcStrict;
4559 }
4560
4561 /* CS must be a code segment. */
4562 if ( !DescCS.Legacy.Gen.u1DescType
4563 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4564 {
4565 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4566 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4567 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4568 }
4569
4570 /* For conforming CS, DPL must be less than or equal to the RPL. */
4571 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4572 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4573 {
4574 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4575 DescCS.Legacy.Gen.u2Dpl));
4576 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4577 }
4578
4579 /* For non-conforming CS, DPL must match RPL. */
4580 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4581 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4582 {
4583 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4584 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4585 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4586 }
4587
4588 /* Is it there? */
4589 if (!DescCS.Legacy.Gen.u1Present)
4590 {
4591 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4592 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4593 }
4594
4595 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4596 u64Base = X86DESC_BASE(&DescCS.Legacy);
4597
4598 /* Set the accessed bit before committing the result into CS. */
4599 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4600 {
4601 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4602 if (rcStrict != VINF_SUCCESS)
4603 return rcStrict;
4604 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4605 }
4606
4607 /* Commit CS. */
4608 pCtx->cs.Sel = uNewCS;
4609 pCtx->cs.ValidSel = uNewCS;
4610 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4611 pCtx->cs.u32Limit = cbLimit;
4612 pCtx->cs.u64Base = u64Base;
4613 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4614 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4615 }
4616
4617 /** @todo Debug trap. */
4618 if (fIsNewTSS386 && fNewDebugTrap)
4619 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4620
4621 /*
4622 * Construct the error code masks based on what caused this task switch.
4623 * See Intel Instruction reference for INT.
4624 */
4625 uint16_t uExt;
4626 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4627 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4628 {
4629 uExt = 1;
4630 }
4631 else
4632 uExt = 0;
4633
4634 /*
4635 * Push any error code on to the new stack.
4636 */
4637 if (fFlags & IEM_XCPT_FLAGS_ERR)
4638 {
4639 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4640 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4641 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4642
4643 /* Check that there is sufficient space on the stack. */
4644 /** @todo Factor out segment limit checking for normal/expand down segments
4645 * into a separate function. */
4646 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4647 {
4648 if ( pCtx->esp - 1 > cbLimitSS
4649 || pCtx->esp < cbStackFrame)
4650 {
4651 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4652 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4653 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4654 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4655 }
4656 }
4657 else
4658 {
4659 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4660 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4661 {
4662 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4663 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4664 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4665 }
4666 }
4667
4668
4669 if (fIsNewTSS386)
4670 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4671 else
4672 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4673 if (rcStrict != VINF_SUCCESS)
4674 {
4675 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4676 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4677 return rcStrict;
4678 }
4679 }
4680
4681 /* Check the new EIP against the new CS limit. */
4682 if (pCtx->eip > pCtx->cs.u32Limit)
4683 {
4684 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4685 pCtx->eip, pCtx->cs.u32Limit));
4686 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4687 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4688 }
4689
4690 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4691 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4692}
4693
4694
4695/**
4696 * Implements exceptions and interrupts for protected mode.
4697 *
4698 * @returns VBox strict status code.
4699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4700 * @param pCtx The CPU context.
4701 * @param cbInstr The number of bytes to offset rIP by in the return
4702 * address.
4703 * @param u8Vector The interrupt / exception vector number.
4704 * @param fFlags The flags.
4705 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4706 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4707 */
4708IEM_STATIC VBOXSTRICTRC
4709iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4710 PCPUMCTX pCtx,
4711 uint8_t cbInstr,
4712 uint8_t u8Vector,
4713 uint32_t fFlags,
4714 uint16_t uErr,
4715 uint64_t uCr2)
4716{
4717 /*
4718 * Read the IDT entry.
4719 */
4720 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4721 {
4722 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4723 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4724 }
4725 X86DESC Idte;
4726 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4727 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4728 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4729 return rcStrict;
4730 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4731 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4732 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4733
4734 /*
4735 * Check the descriptor type, DPL and such.
4736 * ASSUMES this is done in the same order as described for call-gate calls.
4737 */
4738 if (Idte.Gate.u1DescType)
4739 {
4740 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4741 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4742 }
4743 bool fTaskGate = false;
4744 uint8_t f32BitGate = true;
4745 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4746 switch (Idte.Gate.u4Type)
4747 {
4748 case X86_SEL_TYPE_SYS_UNDEFINED:
4749 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4750 case X86_SEL_TYPE_SYS_LDT:
4751 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4752 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4753 case X86_SEL_TYPE_SYS_UNDEFINED2:
4754 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4755 case X86_SEL_TYPE_SYS_UNDEFINED3:
4756 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4757 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4758 case X86_SEL_TYPE_SYS_UNDEFINED4:
4759 {
4760 /** @todo check what actually happens when the type is wrong...
4761 * esp. call gates. */
4762 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4763 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4764 }
4765
4766 case X86_SEL_TYPE_SYS_286_INT_GATE:
4767 f32BitGate = false;
4768 /* fall thru */
4769 case X86_SEL_TYPE_SYS_386_INT_GATE:
4770 fEflToClear |= X86_EFL_IF;
4771 break;
4772
4773 case X86_SEL_TYPE_SYS_TASK_GATE:
4774 fTaskGate = true;
4775#ifndef IEM_IMPLEMENTS_TASKSWITCH
4776 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4777#endif
4778 break;
4779
4780 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4781 f32BitGate = false;
4782 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4783 break;
4784
4785 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4786 }
4787
4788 /* Check DPL against CPL if applicable. */
4789 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4790 {
4791 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4792 {
4793 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4794 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4795 }
4796 }
4797
4798 /* Is it there? */
4799 if (!Idte.Gate.u1Present)
4800 {
4801 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4802 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4803 }
4804
4805 /* Is it a task-gate? */
4806 if (fTaskGate)
4807 {
4808 /*
4809 * Construct the error code masks based on what caused this task switch.
4810 * See Intel Instruction reference for INT.
4811 */
4812 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4813 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4814 RTSEL SelTSS = Idte.Gate.u16Sel;
4815
4816 /*
4817 * Fetch the TSS descriptor in the GDT.
4818 */
4819 IEMSELDESC DescTSS;
4820 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4821 if (rcStrict != VINF_SUCCESS)
4822 {
4823 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4824 VBOXSTRICTRC_VAL(rcStrict)));
4825 return rcStrict;
4826 }
4827
4828 /* The TSS descriptor must be a system segment and be available (not busy). */
4829 if ( DescTSS.Legacy.Gen.u1DescType
4830 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4831 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4832 {
4833 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4834 u8Vector, SelTSS, DescTSS.Legacy.au64));
4835 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4836 }
4837
4838 /* The TSS must be present. */
4839 if (!DescTSS.Legacy.Gen.u1Present)
4840 {
4841 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4842 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4843 }
4844
4845 /* Do the actual task switch. */
4846 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4847 }
4848
4849 /* A null CS is bad. */
4850 RTSEL NewCS = Idte.Gate.u16Sel;
4851 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4852 {
4853 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4854 return iemRaiseGeneralProtectionFault0(pVCpu);
4855 }
4856
4857 /* Fetch the descriptor for the new CS. */
4858 IEMSELDESC DescCS;
4859 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4860 if (rcStrict != VINF_SUCCESS)
4861 {
4862 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4863 return rcStrict;
4864 }
4865
4866 /* Must be a code segment. */
4867 if (!DescCS.Legacy.Gen.u1DescType)
4868 {
4869 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4870 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4871 }
4872 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4873 {
4874 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4875 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4876 }
4877
4878 /* Don't allow lowering the privilege level. */
4879 /** @todo Does the lowering of privileges apply to software interrupts
4880 * only? This has bearings on the more-privileged or
4881 * same-privilege stack behavior further down. A testcase would
4882 * be nice. */
4883 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4884 {
4885 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4886 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4887 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4888 }
4889
4890 /* Make sure the selector is present. */
4891 if (!DescCS.Legacy.Gen.u1Present)
4892 {
4893 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4894 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4895 }
4896
4897 /* Check the new EIP against the new CS limit. */
4898 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4899 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4900 ? Idte.Gate.u16OffsetLow
4901 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4902 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4903 if (uNewEip > cbLimitCS)
4904 {
4905 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4906 u8Vector, uNewEip, cbLimitCS, NewCS));
4907 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4908 }
4909 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4910
4911 /* Calc the flag image to push. */
4912 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4913 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4914 fEfl &= ~X86_EFL_RF;
4915 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4916 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4917
4918 /* From V8086 mode only go to CPL 0. */
4919 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4920 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4921 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4922 {
4923 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4924 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4925 }
4926
4927 /*
4928 * If the privilege level changes, we need to get a new stack from the TSS.
4929 * This in turns means validating the new SS and ESP...
4930 */
4931 if (uNewCpl != pVCpu->iem.s.uCpl)
4932 {
4933 RTSEL NewSS;
4934 uint32_t uNewEsp;
4935 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4936 if (rcStrict != VINF_SUCCESS)
4937 return rcStrict;
4938
4939 IEMSELDESC DescSS;
4940 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4941 if (rcStrict != VINF_SUCCESS)
4942 return rcStrict;
4943 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4944 if (!DescSS.Legacy.Gen.u1DefBig)
4945 {
4946 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4947 uNewEsp = (uint16_t)uNewEsp;
4948 }
4949
4950 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4951
4952 /* Check that there is sufficient space for the stack frame. */
4953 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4954 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4955 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4956 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4957
4958 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4959 {
4960 if ( uNewEsp - 1 > cbLimitSS
4961 || uNewEsp < cbStackFrame)
4962 {
4963 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4964 u8Vector, NewSS, uNewEsp, cbStackFrame));
4965 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4966 }
4967 }
4968 else
4969 {
4970 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4971 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4972 {
4973 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4974 u8Vector, NewSS, uNewEsp, cbStackFrame));
4975 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4976 }
4977 }
4978
4979 /*
4980 * Start making changes.
4981 */
4982
4983 /* Set the new CPL so that stack accesses use it. */
4984 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4985 pVCpu->iem.s.uCpl = uNewCpl;
4986
4987 /* Create the stack frame. */
4988 RTPTRUNION uStackFrame;
4989 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4990 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4991 if (rcStrict != VINF_SUCCESS)
4992 return rcStrict;
4993 void * const pvStackFrame = uStackFrame.pv;
4994 if (f32BitGate)
4995 {
4996 if (fFlags & IEM_XCPT_FLAGS_ERR)
4997 *uStackFrame.pu32++ = uErr;
4998 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4999 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5000 uStackFrame.pu32[2] = fEfl;
5001 uStackFrame.pu32[3] = pCtx->esp;
5002 uStackFrame.pu32[4] = pCtx->ss.Sel;
5003 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
5004 if (fEfl & X86_EFL_VM)
5005 {
5006 uStackFrame.pu32[1] = pCtx->cs.Sel;
5007 uStackFrame.pu32[5] = pCtx->es.Sel;
5008 uStackFrame.pu32[6] = pCtx->ds.Sel;
5009 uStackFrame.pu32[7] = pCtx->fs.Sel;
5010 uStackFrame.pu32[8] = pCtx->gs.Sel;
5011 }
5012 }
5013 else
5014 {
5015 if (fFlags & IEM_XCPT_FLAGS_ERR)
5016 *uStackFrame.pu16++ = uErr;
5017 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
5018 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5019 uStackFrame.pu16[2] = fEfl;
5020 uStackFrame.pu16[3] = pCtx->sp;
5021 uStackFrame.pu16[4] = pCtx->ss.Sel;
5022 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
5023 if (fEfl & X86_EFL_VM)
5024 {
5025 uStackFrame.pu16[1] = pCtx->cs.Sel;
5026 uStackFrame.pu16[5] = pCtx->es.Sel;
5027 uStackFrame.pu16[6] = pCtx->ds.Sel;
5028 uStackFrame.pu16[7] = pCtx->fs.Sel;
5029 uStackFrame.pu16[8] = pCtx->gs.Sel;
5030 }
5031 }
5032 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5033 if (rcStrict != VINF_SUCCESS)
5034 return rcStrict;
5035
5036 /* Mark the selectors 'accessed' (hope this is the correct time). */
5037 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5038 * after pushing the stack frame? (Write protect the gdt + stack to
5039 * find out.) */
5040 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5041 {
5042 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5043 if (rcStrict != VINF_SUCCESS)
5044 return rcStrict;
5045 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5046 }
5047
5048 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5049 {
5050 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5051 if (rcStrict != VINF_SUCCESS)
5052 return rcStrict;
5053 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5054 }
5055
5056 /*
5057 * Start comitting the register changes (joins with the DPL=CPL branch).
5058 */
5059 pCtx->ss.Sel = NewSS;
5060 pCtx->ss.ValidSel = NewSS;
5061 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5062 pCtx->ss.u32Limit = cbLimitSS;
5063 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5064 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5065 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5066 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5067 * SP is loaded).
5068 * Need to check the other combinations too:
5069 * - 16-bit TSS, 32-bit handler
5070 * - 32-bit TSS, 16-bit handler */
5071 if (!pCtx->ss.Attr.n.u1DefBig)
5072 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5073 else
5074 pCtx->rsp = uNewEsp - cbStackFrame;
5075
5076 if (fEfl & X86_EFL_VM)
5077 {
5078 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5079 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5080 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5081 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5082 }
5083 }
5084 /*
5085 * Same privilege, no stack change and smaller stack frame.
5086 */
5087 else
5088 {
5089 uint64_t uNewRsp;
5090 RTPTRUNION uStackFrame;
5091 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5092 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5093 if (rcStrict != VINF_SUCCESS)
5094 return rcStrict;
5095 void * const pvStackFrame = uStackFrame.pv;
5096
5097 if (f32BitGate)
5098 {
5099 if (fFlags & IEM_XCPT_FLAGS_ERR)
5100 *uStackFrame.pu32++ = uErr;
5101 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5102 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5103 uStackFrame.pu32[2] = fEfl;
5104 }
5105 else
5106 {
5107 if (fFlags & IEM_XCPT_FLAGS_ERR)
5108 *uStackFrame.pu16++ = uErr;
5109 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5110 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5111 uStackFrame.pu16[2] = fEfl;
5112 }
5113 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5114 if (rcStrict != VINF_SUCCESS)
5115 return rcStrict;
5116
5117 /* Mark the CS selector as 'accessed'. */
5118 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5119 {
5120 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5121 if (rcStrict != VINF_SUCCESS)
5122 return rcStrict;
5123 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5124 }
5125
5126 /*
5127 * Start committing the register changes (joins with the other branch).
5128 */
5129 pCtx->rsp = uNewRsp;
5130 }
5131
5132 /* ... register committing continues. */
5133 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5134 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5135 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5136 pCtx->cs.u32Limit = cbLimitCS;
5137 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5138 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5139
5140 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5141 fEfl &= ~fEflToClear;
5142 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5143
5144 if (fFlags & IEM_XCPT_FLAGS_CR2)
5145 pCtx->cr2 = uCr2;
5146
5147 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5148 iemRaiseXcptAdjustState(pCtx, u8Vector);
5149
5150 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5151}
5152
5153
5154/**
5155 * Implements exceptions and interrupts for long mode.
5156 *
5157 * @returns VBox strict status code.
5158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5159 * @param pCtx The CPU context.
5160 * @param cbInstr The number of bytes to offset rIP by in the return
5161 * address.
5162 * @param u8Vector The interrupt / exception vector number.
5163 * @param fFlags The flags.
5164 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5165 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5166 */
5167IEM_STATIC VBOXSTRICTRC
5168iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5169 PCPUMCTX pCtx,
5170 uint8_t cbInstr,
5171 uint8_t u8Vector,
5172 uint32_t fFlags,
5173 uint16_t uErr,
5174 uint64_t uCr2)
5175{
5176 /*
5177 * Read the IDT entry.
5178 */
5179 uint16_t offIdt = (uint16_t)u8Vector << 4;
5180 if (pCtx->idtr.cbIdt < offIdt + 7)
5181 {
5182 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5183 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5184 }
5185 X86DESC64 Idte;
5186 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5187 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5188 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5189 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5190 return rcStrict;
5191 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5192 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5193 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5194
5195 /*
5196 * Check the descriptor type, DPL and such.
5197 * ASSUMES this is done in the same order as described for call-gate calls.
5198 */
5199 if (Idte.Gate.u1DescType)
5200 {
5201 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5202 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5203 }
5204 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5205 switch (Idte.Gate.u4Type)
5206 {
5207 case AMD64_SEL_TYPE_SYS_INT_GATE:
5208 fEflToClear |= X86_EFL_IF;
5209 break;
5210 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5211 break;
5212
5213 default:
5214 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5215 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5216 }
5217
5218 /* Check DPL against CPL if applicable. */
5219 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5220 {
5221 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5222 {
5223 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5224 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5225 }
5226 }
5227
5228 /* Is it there? */
5229 if (!Idte.Gate.u1Present)
5230 {
5231 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5232 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5233 }
5234
5235 /* A null CS is bad. */
5236 RTSEL NewCS = Idte.Gate.u16Sel;
5237 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5238 {
5239 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5240 return iemRaiseGeneralProtectionFault0(pVCpu);
5241 }
5242
5243 /* Fetch the descriptor for the new CS. */
5244 IEMSELDESC DescCS;
5245 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5246 if (rcStrict != VINF_SUCCESS)
5247 {
5248 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5249 return rcStrict;
5250 }
5251
5252 /* Must be a 64-bit code segment. */
5253 if (!DescCS.Long.Gen.u1DescType)
5254 {
5255 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5256 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5257 }
5258 if ( !DescCS.Long.Gen.u1Long
5259 || DescCS.Long.Gen.u1DefBig
5260 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5261 {
5262 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5263 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5264 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5265 }
5266
5267 /* Don't allow lowering the privilege level. For non-conforming CS
5268 selectors, the CS.DPL sets the privilege level the trap/interrupt
5269 handler runs at. For conforming CS selectors, the CPL remains
5270 unchanged, but the CS.DPL must be <= CPL. */
5271 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5272 * when CPU in Ring-0. Result \#GP? */
5273 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5274 {
5275 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5276 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5277 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5278 }
5279
5280
5281 /* Make sure the selector is present. */
5282 if (!DescCS.Legacy.Gen.u1Present)
5283 {
5284 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5285 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5286 }
5287
5288 /* Check that the new RIP is canonical. */
5289 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5290 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5291 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5292 if (!IEM_IS_CANONICAL(uNewRip))
5293 {
5294 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5295 return iemRaiseGeneralProtectionFault0(pVCpu);
5296 }
5297
5298 /*
5299 * If the privilege level changes or if the IST isn't zero, we need to get
5300 * a new stack from the TSS.
5301 */
5302 uint64_t uNewRsp;
5303 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5304 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5305 if ( uNewCpl != pVCpu->iem.s.uCpl
5306 || Idte.Gate.u3IST != 0)
5307 {
5308 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5309 if (rcStrict != VINF_SUCCESS)
5310 return rcStrict;
5311 }
5312 else
5313 uNewRsp = pCtx->rsp;
5314 uNewRsp &= ~(uint64_t)0xf;
5315
5316 /*
5317 * Calc the flag image to push.
5318 */
5319 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5320 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5321 fEfl &= ~X86_EFL_RF;
5322 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5323 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5324
5325 /*
5326 * Start making changes.
5327 */
5328 /* Set the new CPL so that stack accesses use it. */
5329 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5330 pVCpu->iem.s.uCpl = uNewCpl;
5331
5332 /* Create the stack frame. */
5333 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5334 RTPTRUNION uStackFrame;
5335 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5336 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5337 if (rcStrict != VINF_SUCCESS)
5338 return rcStrict;
5339 void * const pvStackFrame = uStackFrame.pv;
5340
5341 if (fFlags & IEM_XCPT_FLAGS_ERR)
5342 *uStackFrame.pu64++ = uErr;
5343 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5344 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5345 uStackFrame.pu64[2] = fEfl;
5346 uStackFrame.pu64[3] = pCtx->rsp;
5347 uStackFrame.pu64[4] = pCtx->ss.Sel;
5348 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5349 if (rcStrict != VINF_SUCCESS)
5350 return rcStrict;
5351
5352 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5353 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5354 * after pushing the stack frame? (Write protect the gdt + stack to
5355 * find out.) */
5356 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5357 {
5358 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5359 if (rcStrict != VINF_SUCCESS)
5360 return rcStrict;
5361 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5362 }
5363
5364 /*
5365 * Start comitting the register changes.
5366 */
5367 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5368 * hidden registers when interrupting 32-bit or 16-bit code! */
5369 if (uNewCpl != uOldCpl)
5370 {
5371 pCtx->ss.Sel = 0 | uNewCpl;
5372 pCtx->ss.ValidSel = 0 | uNewCpl;
5373 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5374 pCtx->ss.u32Limit = UINT32_MAX;
5375 pCtx->ss.u64Base = 0;
5376 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5377 }
5378 pCtx->rsp = uNewRsp - cbStackFrame;
5379 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5380 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5381 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5382 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5383 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5384 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5385 pCtx->rip = uNewRip;
5386
5387 fEfl &= ~fEflToClear;
5388 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5389
5390 if (fFlags & IEM_XCPT_FLAGS_CR2)
5391 pCtx->cr2 = uCr2;
5392
5393 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5394 iemRaiseXcptAdjustState(pCtx, u8Vector);
5395
5396 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5397}
5398
5399
5400/**
5401 * Implements exceptions and interrupts.
5402 *
5403 * All exceptions and interrupts goes thru this function!
5404 *
5405 * @returns VBox strict status code.
5406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5407 * @param cbInstr The number of bytes to offset rIP by in the return
5408 * address.
5409 * @param u8Vector The interrupt / exception vector number.
5410 * @param fFlags The flags.
5411 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5412 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5413 */
5414DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5415iemRaiseXcptOrInt(PVMCPU pVCpu,
5416 uint8_t cbInstr,
5417 uint8_t u8Vector,
5418 uint32_t fFlags,
5419 uint16_t uErr,
5420 uint64_t uCr2)
5421{
5422 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5423#ifdef IN_RING0
5424 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5425 AssertRCReturn(rc, rc);
5426#endif
5427
5428#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5429 /*
5430 * Flush prefetch buffer
5431 */
5432 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5433#endif
5434
5435 /*
5436 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5437 */
5438 if ( pCtx->eflags.Bits.u1VM
5439 && pCtx->eflags.Bits.u2IOPL != 3
5440 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5441 && (pCtx->cr0 & X86_CR0_PE) )
5442 {
5443 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5444 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5445 u8Vector = X86_XCPT_GP;
5446 uErr = 0;
5447 }
5448#ifdef DBGFTRACE_ENABLED
5449 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5450 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5451 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5452#endif
5453
5454#ifdef VBOX_WITH_NESTED_HWVIRT
5455 if (IEM_IS_SVM_ENABLED(pVCpu))
5456 {
5457 /*
5458 * If the event is being injected as part of VMRUN, it isn't subject to event
5459 * intercepts in the nested-guest. However, secondary exceptions that occur
5460 * during injection of any event -are- subject to exception intercepts.
5461 * See AMD spec. 15.20 "Event Injection".
5462 */
5463 if (!pCtx->hwvirt.svm.fInterceptEvents)
5464 pCtx->hwvirt.svm.fInterceptEvents = 1;
5465 else
5466 {
5467 /*
5468 * Check and handle if the event being raised is intercepted.
5469 */
5470 VBOXSTRICTRC rcStrict0 = iemHandleSvmNstGstEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5471 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5472 return rcStrict0;
5473 }
5474 }
5475#endif /* VBOX_WITH_NESTED_HWVIRT */
5476
5477 /*
5478 * Do recursion accounting.
5479 */
5480 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5481 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5482 if (pVCpu->iem.s.cXcptRecursions == 0)
5483 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5484 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5485 else
5486 {
5487 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5488 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5489 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5490
5491 if (pVCpu->iem.s.cXcptRecursions >= 3)
5492 {
5493#ifdef DEBUG_bird
5494 AssertFailed();
5495#endif
5496 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5497 }
5498
5499 /*
5500 * Evaluate the sequence of recurring events.
5501 */
5502 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5503 NULL /* pXcptRaiseInfo */);
5504 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5505 { /* likely */ }
5506 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5507 {
5508 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5509 u8Vector = X86_XCPT_DF;
5510 uErr = 0;
5511 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5512 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5513 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5514 }
5515 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5516 {
5517 Log2(("iemRaiseXcptOrInt: raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5518 return iemInitiateCpuShutdown(pVCpu);
5519 }
5520 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5521 {
5522 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5523 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5524 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5525 return VERR_EM_GUEST_CPU_HANG;
5526 }
5527 else
5528 {
5529 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5530 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5531 return VERR_IEM_IPE_9;
5532 }
5533
5534 /*
5535 * The 'EXT' bit is set when an exception occurs during deliver of an external
5536 * event (such as an interrupt or earlier exception)[1]. Privileged software
5537 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5538 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5539 *
5540 * [1] - Intel spec. 6.13 "Error Code"
5541 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5542 * [3] - Intel Instruction reference for INT n.
5543 */
5544 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5545 && (fFlags & IEM_XCPT_FLAGS_ERR)
5546 && u8Vector != X86_XCPT_PF
5547 && u8Vector != X86_XCPT_DF)
5548 {
5549 uErr |= X86_TRAP_ERR_EXTERNAL;
5550 }
5551 }
5552
5553 pVCpu->iem.s.cXcptRecursions++;
5554 pVCpu->iem.s.uCurXcpt = u8Vector;
5555 pVCpu->iem.s.fCurXcpt = fFlags;
5556 pVCpu->iem.s.uCurXcptErr = uErr;
5557 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5558
5559 /*
5560 * Extensive logging.
5561 */
5562#if defined(LOG_ENABLED) && defined(IN_RING3)
5563 if (LogIs3Enabled())
5564 {
5565 PVM pVM = pVCpu->CTX_SUFF(pVM);
5566 char szRegs[4096];
5567 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5568 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5569 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5570 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5571 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5572 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5573 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5574 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5575 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5576 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5577 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5578 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5579 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5580 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5581 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5582 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5583 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5584 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5585 " efer=%016VR{efer}\n"
5586 " pat=%016VR{pat}\n"
5587 " sf_mask=%016VR{sf_mask}\n"
5588 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5589 " lstar=%016VR{lstar}\n"
5590 " star=%016VR{star} cstar=%016VR{cstar}\n"
5591 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5592 );
5593
5594 char szInstr[256];
5595 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5596 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5597 szInstr, sizeof(szInstr), NULL);
5598 Log3(("%s%s\n", szRegs, szInstr));
5599 }
5600#endif /* LOG_ENABLED */
5601
5602 /*
5603 * Call the mode specific worker function.
5604 */
5605 VBOXSTRICTRC rcStrict;
5606 if (!(pCtx->cr0 & X86_CR0_PE))
5607 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5608 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5609 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5610 else
5611 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5612
5613 /* Flush the prefetch buffer. */
5614#ifdef IEM_WITH_CODE_TLB
5615 pVCpu->iem.s.pbInstrBuf = NULL;
5616#else
5617 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5618#endif
5619
5620 /*
5621 * Unwind.
5622 */
5623 pVCpu->iem.s.cXcptRecursions--;
5624 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5625 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5626 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5627 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5628 return rcStrict;
5629}
5630
5631#ifdef IEM_WITH_SETJMP
5632/**
5633 * See iemRaiseXcptOrInt. Will not return.
5634 */
5635IEM_STATIC DECL_NO_RETURN(void)
5636iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5637 uint8_t cbInstr,
5638 uint8_t u8Vector,
5639 uint32_t fFlags,
5640 uint16_t uErr,
5641 uint64_t uCr2)
5642{
5643 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5644 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5645}
5646#endif
5647
5648
5649/** \#DE - 00. */
5650DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5651{
5652 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5653}
5654
5655
5656/** \#DB - 01.
5657 * @note This automatically clear DR7.GD. */
5658DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5659{
5660 /** @todo set/clear RF. */
5661 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5662 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5663}
5664
5665
5666/** \#BR - 05. */
5667DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5668{
5669 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5670}
5671
5672
5673/** \#UD - 06. */
5674DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5675{
5676 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5677}
5678
5679
5680/** \#NM - 07. */
5681DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5682{
5683 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5684}
5685
5686
5687/** \#TS(err) - 0a. */
5688DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5689{
5690 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5691}
5692
5693
5694/** \#TS(tr) - 0a. */
5695DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5696{
5697 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5698 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5699}
5700
5701
5702/** \#TS(0) - 0a. */
5703DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5704{
5705 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5706 0, 0);
5707}
5708
5709
5710/** \#TS(err) - 0a. */
5711DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5712{
5713 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5714 uSel & X86_SEL_MASK_OFF_RPL, 0);
5715}
5716
5717
5718/** \#NP(err) - 0b. */
5719DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5720{
5721 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5722}
5723
5724
5725/** \#NP(sel) - 0b. */
5726DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5727{
5728 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5729 uSel & ~X86_SEL_RPL, 0);
5730}
5731
5732
5733/** \#SS(seg) - 0c. */
5734DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5735{
5736 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5737 uSel & ~X86_SEL_RPL, 0);
5738}
5739
5740
5741/** \#SS(err) - 0c. */
5742DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5743{
5744 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5745}
5746
5747
5748/** \#GP(n) - 0d. */
5749DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5750{
5751 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5752}
5753
5754
5755/** \#GP(0) - 0d. */
5756DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5757{
5758 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5759}
5760
5761#ifdef IEM_WITH_SETJMP
5762/** \#GP(0) - 0d. */
5763DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5764{
5765 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5766}
5767#endif
5768
5769
5770/** \#GP(sel) - 0d. */
5771DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5772{
5773 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5774 Sel & ~X86_SEL_RPL, 0);
5775}
5776
5777
5778/** \#GP(0) - 0d. */
5779DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5780{
5781 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5782}
5783
5784
5785/** \#GP(sel) - 0d. */
5786DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5787{
5788 NOREF(iSegReg); NOREF(fAccess);
5789 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5790 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5791}
5792
5793#ifdef IEM_WITH_SETJMP
5794/** \#GP(sel) - 0d, longjmp. */
5795DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5796{
5797 NOREF(iSegReg); NOREF(fAccess);
5798 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5799 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5800}
5801#endif
5802
5803/** \#GP(sel) - 0d. */
5804DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5805{
5806 NOREF(Sel);
5807 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5808}
5809
5810#ifdef IEM_WITH_SETJMP
5811/** \#GP(sel) - 0d, longjmp. */
5812DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5813{
5814 NOREF(Sel);
5815 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5816}
5817#endif
5818
5819
5820/** \#GP(sel) - 0d. */
5821DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5822{
5823 NOREF(iSegReg); NOREF(fAccess);
5824 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5825}
5826
5827#ifdef IEM_WITH_SETJMP
5828/** \#GP(sel) - 0d, longjmp. */
5829DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5830 uint32_t fAccess)
5831{
5832 NOREF(iSegReg); NOREF(fAccess);
5833 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5834}
5835#endif
5836
5837
5838/** \#PF(n) - 0e. */
5839DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5840{
5841 uint16_t uErr;
5842 switch (rc)
5843 {
5844 case VERR_PAGE_NOT_PRESENT:
5845 case VERR_PAGE_TABLE_NOT_PRESENT:
5846 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5847 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5848 uErr = 0;
5849 break;
5850
5851 default:
5852 AssertMsgFailed(("%Rrc\n", rc));
5853 /* fall thru */
5854 case VERR_ACCESS_DENIED:
5855 uErr = X86_TRAP_PF_P;
5856 break;
5857
5858 /** @todo reserved */
5859 }
5860
5861 if (pVCpu->iem.s.uCpl == 3)
5862 uErr |= X86_TRAP_PF_US;
5863
5864 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5865 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5866 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5867 uErr |= X86_TRAP_PF_ID;
5868
5869#if 0 /* This is so much non-sense, really. Why was it done like that? */
5870 /* Note! RW access callers reporting a WRITE protection fault, will clear
5871 the READ flag before calling. So, read-modify-write accesses (RW)
5872 can safely be reported as READ faults. */
5873 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5874 uErr |= X86_TRAP_PF_RW;
5875#else
5876 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5877 {
5878 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5879 uErr |= X86_TRAP_PF_RW;
5880 }
5881#endif
5882
5883 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5884 uErr, GCPtrWhere);
5885}
5886
5887#ifdef IEM_WITH_SETJMP
5888/** \#PF(n) - 0e, longjmp. */
5889IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5890{
5891 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5892}
5893#endif
5894
5895
5896/** \#MF(0) - 10. */
5897DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5898{
5899 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5900}
5901
5902
5903/** \#AC(0) - 11. */
5904DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5905{
5906 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5907}
5908
5909
5910/**
5911 * Macro for calling iemCImplRaiseDivideError().
5912 *
5913 * This enables us to add/remove arguments and force different levels of
5914 * inlining as we wish.
5915 *
5916 * @return Strict VBox status code.
5917 */
5918#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5919IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5920{
5921 NOREF(cbInstr);
5922 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5923}
5924
5925
5926/**
5927 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5928 *
5929 * This enables us to add/remove arguments and force different levels of
5930 * inlining as we wish.
5931 *
5932 * @return Strict VBox status code.
5933 */
5934#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5935IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5936{
5937 NOREF(cbInstr);
5938 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5939}
5940
5941
5942/**
5943 * Macro for calling iemCImplRaiseInvalidOpcode().
5944 *
5945 * This enables us to add/remove arguments and force different levels of
5946 * inlining as we wish.
5947 *
5948 * @return Strict VBox status code.
5949 */
5950#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5951IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5952{
5953 NOREF(cbInstr);
5954 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5955}
5956
5957
5958/** @} */
5959
5960
5961/*
5962 *
5963 * Helpers routines.
5964 * Helpers routines.
5965 * Helpers routines.
5966 *
5967 */
5968
5969/**
5970 * Recalculates the effective operand size.
5971 *
5972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5973 */
5974IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5975{
5976 switch (pVCpu->iem.s.enmCpuMode)
5977 {
5978 case IEMMODE_16BIT:
5979 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5980 break;
5981 case IEMMODE_32BIT:
5982 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5983 break;
5984 case IEMMODE_64BIT:
5985 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5986 {
5987 case 0:
5988 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5989 break;
5990 case IEM_OP_PRF_SIZE_OP:
5991 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5992 break;
5993 case IEM_OP_PRF_SIZE_REX_W:
5994 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5995 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5996 break;
5997 }
5998 break;
5999 default:
6000 AssertFailed();
6001 }
6002}
6003
6004
6005/**
6006 * Sets the default operand size to 64-bit and recalculates the effective
6007 * operand size.
6008 *
6009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6010 */
6011IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6012{
6013 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6014 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6015 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6016 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6017 else
6018 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6019}
6020
6021
6022/*
6023 *
6024 * Common opcode decoders.
6025 * Common opcode decoders.
6026 * Common opcode decoders.
6027 *
6028 */
6029//#include <iprt/mem.h>
6030
6031/**
6032 * Used to add extra details about a stub case.
6033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6034 */
6035IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6036{
6037#if defined(LOG_ENABLED) && defined(IN_RING3)
6038 PVM pVM = pVCpu->CTX_SUFF(pVM);
6039 char szRegs[4096];
6040 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6041 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6042 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6043 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6044 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6045 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6046 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6047 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6048 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6049 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6050 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6051 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6052 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6053 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6054 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6055 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6056 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6057 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6058 " efer=%016VR{efer}\n"
6059 " pat=%016VR{pat}\n"
6060 " sf_mask=%016VR{sf_mask}\n"
6061 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6062 " lstar=%016VR{lstar}\n"
6063 " star=%016VR{star} cstar=%016VR{cstar}\n"
6064 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6065 );
6066
6067 char szInstr[256];
6068 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6069 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6070 szInstr, sizeof(szInstr), NULL);
6071
6072 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6073#else
6074 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6075#endif
6076}
6077
6078/**
6079 * Complains about a stub.
6080 *
6081 * Providing two versions of this macro, one for daily use and one for use when
6082 * working on IEM.
6083 */
6084#if 0
6085# define IEMOP_BITCH_ABOUT_STUB() \
6086 do { \
6087 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6088 iemOpStubMsg2(pVCpu); \
6089 RTAssertPanic(); \
6090 } while (0)
6091#else
6092# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6093#endif
6094
6095/** Stubs an opcode. */
6096#define FNIEMOP_STUB(a_Name) \
6097 FNIEMOP_DEF(a_Name) \
6098 { \
6099 RT_NOREF_PV(pVCpu); \
6100 IEMOP_BITCH_ABOUT_STUB(); \
6101 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6102 } \
6103 typedef int ignore_semicolon
6104
6105/** Stubs an opcode. */
6106#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6107 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6108 { \
6109 RT_NOREF_PV(pVCpu); \
6110 RT_NOREF_PV(a_Name0); \
6111 IEMOP_BITCH_ABOUT_STUB(); \
6112 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6113 } \
6114 typedef int ignore_semicolon
6115
6116/** Stubs an opcode which currently should raise \#UD. */
6117#define FNIEMOP_UD_STUB(a_Name) \
6118 FNIEMOP_DEF(a_Name) \
6119 { \
6120 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6121 return IEMOP_RAISE_INVALID_OPCODE(); \
6122 } \
6123 typedef int ignore_semicolon
6124
6125/** Stubs an opcode which currently should raise \#UD. */
6126#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6127 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6128 { \
6129 RT_NOREF_PV(pVCpu); \
6130 RT_NOREF_PV(a_Name0); \
6131 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6132 return IEMOP_RAISE_INVALID_OPCODE(); \
6133 } \
6134 typedef int ignore_semicolon
6135
6136
6137
6138/** @name Register Access.
6139 * @{
6140 */
6141
6142/**
6143 * Gets a reference (pointer) to the specified hidden segment register.
6144 *
6145 * @returns Hidden register reference.
6146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6147 * @param iSegReg The segment register.
6148 */
6149IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6150{
6151 Assert(iSegReg < X86_SREG_COUNT);
6152 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6153 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6154
6155#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6156 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6157 { /* likely */ }
6158 else
6159 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6160#else
6161 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6162#endif
6163 return pSReg;
6164}
6165
6166
6167/**
6168 * Ensures that the given hidden segment register is up to date.
6169 *
6170 * @returns Hidden register reference.
6171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6172 * @param pSReg The segment register.
6173 */
6174IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6175{
6176#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6177 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6178 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6179#else
6180 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6181 NOREF(pVCpu);
6182#endif
6183 return pSReg;
6184}
6185
6186
6187/**
6188 * Gets a reference (pointer) to the specified segment register (the selector
6189 * value).
6190 *
6191 * @returns Pointer to the selector variable.
6192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6193 * @param iSegReg The segment register.
6194 */
6195DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6196{
6197 Assert(iSegReg < X86_SREG_COUNT);
6198 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6199 return &pCtx->aSRegs[iSegReg].Sel;
6200}
6201
6202
6203/**
6204 * Fetches the selector value of a segment register.
6205 *
6206 * @returns The selector value.
6207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6208 * @param iSegReg The segment register.
6209 */
6210DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6211{
6212 Assert(iSegReg < X86_SREG_COUNT);
6213 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6214}
6215
6216
6217/**
6218 * Gets a reference (pointer) to the specified general purpose register.
6219 *
6220 * @returns Register reference.
6221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6222 * @param iReg The general purpose register.
6223 */
6224DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6225{
6226 Assert(iReg < 16);
6227 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6228 return &pCtx->aGRegs[iReg];
6229}
6230
6231
6232/**
6233 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6234 *
6235 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6236 *
6237 * @returns Register reference.
6238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6239 * @param iReg The register.
6240 */
6241DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6242{
6243 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6244 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6245 {
6246 Assert(iReg < 16);
6247 return &pCtx->aGRegs[iReg].u8;
6248 }
6249 /* high 8-bit register. */
6250 Assert(iReg < 8);
6251 return &pCtx->aGRegs[iReg & 3].bHi;
6252}
6253
6254
6255/**
6256 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6257 *
6258 * @returns Register reference.
6259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6260 * @param iReg The register.
6261 */
6262DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6263{
6264 Assert(iReg < 16);
6265 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6266 return &pCtx->aGRegs[iReg].u16;
6267}
6268
6269
6270/**
6271 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6272 *
6273 * @returns Register reference.
6274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6275 * @param iReg The register.
6276 */
6277DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6278{
6279 Assert(iReg < 16);
6280 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6281 return &pCtx->aGRegs[iReg].u32;
6282}
6283
6284
6285/**
6286 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6287 *
6288 * @returns Register reference.
6289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6290 * @param iReg The register.
6291 */
6292DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6293{
6294 Assert(iReg < 64);
6295 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6296 return &pCtx->aGRegs[iReg].u64;
6297}
6298
6299
6300/**
6301 * Fetches the value of a 8-bit general purpose register.
6302 *
6303 * @returns The register value.
6304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6305 * @param iReg The register.
6306 */
6307DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6308{
6309 return *iemGRegRefU8(pVCpu, iReg);
6310}
6311
6312
6313/**
6314 * Fetches the value of a 16-bit general purpose register.
6315 *
6316 * @returns The register value.
6317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6318 * @param iReg The register.
6319 */
6320DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6321{
6322 Assert(iReg < 16);
6323 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6324}
6325
6326
6327/**
6328 * Fetches the value of a 32-bit general purpose register.
6329 *
6330 * @returns The register value.
6331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6332 * @param iReg The register.
6333 */
6334DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6335{
6336 Assert(iReg < 16);
6337 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6338}
6339
6340
6341/**
6342 * Fetches the value of a 64-bit general purpose register.
6343 *
6344 * @returns The register value.
6345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6346 * @param iReg The register.
6347 */
6348DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6349{
6350 Assert(iReg < 16);
6351 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6352}
6353
6354
6355/**
6356 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6357 *
6358 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6359 * segment limit.
6360 *
6361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6362 * @param offNextInstr The offset of the next instruction.
6363 */
6364IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6365{
6366 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6367 switch (pVCpu->iem.s.enmEffOpSize)
6368 {
6369 case IEMMODE_16BIT:
6370 {
6371 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6372 if ( uNewIp > pCtx->cs.u32Limit
6373 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6374 return iemRaiseGeneralProtectionFault0(pVCpu);
6375 pCtx->rip = uNewIp;
6376 break;
6377 }
6378
6379 case IEMMODE_32BIT:
6380 {
6381 Assert(pCtx->rip <= UINT32_MAX);
6382 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6383
6384 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6385 if (uNewEip > pCtx->cs.u32Limit)
6386 return iemRaiseGeneralProtectionFault0(pVCpu);
6387 pCtx->rip = uNewEip;
6388 break;
6389 }
6390
6391 case IEMMODE_64BIT:
6392 {
6393 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6394
6395 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6396 if (!IEM_IS_CANONICAL(uNewRip))
6397 return iemRaiseGeneralProtectionFault0(pVCpu);
6398 pCtx->rip = uNewRip;
6399 break;
6400 }
6401
6402 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6403 }
6404
6405 pCtx->eflags.Bits.u1RF = 0;
6406
6407#ifndef IEM_WITH_CODE_TLB
6408 /* Flush the prefetch buffer. */
6409 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6410#endif
6411
6412 return VINF_SUCCESS;
6413}
6414
6415
6416/**
6417 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6418 *
6419 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6420 * segment limit.
6421 *
6422 * @returns Strict VBox status code.
6423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6424 * @param offNextInstr The offset of the next instruction.
6425 */
6426IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6427{
6428 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6429 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6430
6431 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6432 if ( uNewIp > pCtx->cs.u32Limit
6433 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6434 return iemRaiseGeneralProtectionFault0(pVCpu);
6435 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6436 pCtx->rip = uNewIp;
6437 pCtx->eflags.Bits.u1RF = 0;
6438
6439#ifndef IEM_WITH_CODE_TLB
6440 /* Flush the prefetch buffer. */
6441 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6442#endif
6443
6444 return VINF_SUCCESS;
6445}
6446
6447
6448/**
6449 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6450 *
6451 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6452 * segment limit.
6453 *
6454 * @returns Strict VBox status code.
6455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6456 * @param offNextInstr The offset of the next instruction.
6457 */
6458IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6459{
6460 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6461 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6462
6463 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6464 {
6465 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6466
6467 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6468 if (uNewEip > pCtx->cs.u32Limit)
6469 return iemRaiseGeneralProtectionFault0(pVCpu);
6470 pCtx->rip = uNewEip;
6471 }
6472 else
6473 {
6474 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6475
6476 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6477 if (!IEM_IS_CANONICAL(uNewRip))
6478 return iemRaiseGeneralProtectionFault0(pVCpu);
6479 pCtx->rip = uNewRip;
6480 }
6481 pCtx->eflags.Bits.u1RF = 0;
6482
6483#ifndef IEM_WITH_CODE_TLB
6484 /* Flush the prefetch buffer. */
6485 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6486#endif
6487
6488 return VINF_SUCCESS;
6489}
6490
6491
6492/**
6493 * Performs a near jump to the specified address.
6494 *
6495 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6496 * segment limit.
6497 *
6498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6499 * @param uNewRip The new RIP value.
6500 */
6501IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6502{
6503 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6504 switch (pVCpu->iem.s.enmEffOpSize)
6505 {
6506 case IEMMODE_16BIT:
6507 {
6508 Assert(uNewRip <= UINT16_MAX);
6509 if ( uNewRip > pCtx->cs.u32Limit
6510 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6511 return iemRaiseGeneralProtectionFault0(pVCpu);
6512 /** @todo Test 16-bit jump in 64-bit mode. */
6513 pCtx->rip = uNewRip;
6514 break;
6515 }
6516
6517 case IEMMODE_32BIT:
6518 {
6519 Assert(uNewRip <= UINT32_MAX);
6520 Assert(pCtx->rip <= UINT32_MAX);
6521 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6522
6523 if (uNewRip > pCtx->cs.u32Limit)
6524 return iemRaiseGeneralProtectionFault0(pVCpu);
6525 pCtx->rip = uNewRip;
6526 break;
6527 }
6528
6529 case IEMMODE_64BIT:
6530 {
6531 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6532
6533 if (!IEM_IS_CANONICAL(uNewRip))
6534 return iemRaiseGeneralProtectionFault0(pVCpu);
6535 pCtx->rip = uNewRip;
6536 break;
6537 }
6538
6539 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6540 }
6541
6542 pCtx->eflags.Bits.u1RF = 0;
6543
6544#ifndef IEM_WITH_CODE_TLB
6545 /* Flush the prefetch buffer. */
6546 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6547#endif
6548
6549 return VINF_SUCCESS;
6550}
6551
6552
6553/**
6554 * Get the address of the top of the stack.
6555 *
6556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6557 * @param pCtx The CPU context which SP/ESP/RSP should be
6558 * read.
6559 */
6560DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6561{
6562 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6563 return pCtx->rsp;
6564 if (pCtx->ss.Attr.n.u1DefBig)
6565 return pCtx->esp;
6566 return pCtx->sp;
6567}
6568
6569
6570/**
6571 * Updates the RIP/EIP/IP to point to the next instruction.
6572 *
6573 * This function leaves the EFLAGS.RF flag alone.
6574 *
6575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6576 * @param cbInstr The number of bytes to add.
6577 */
6578IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6579{
6580 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6581 switch (pVCpu->iem.s.enmCpuMode)
6582 {
6583 case IEMMODE_16BIT:
6584 Assert(pCtx->rip <= UINT16_MAX);
6585 pCtx->eip += cbInstr;
6586 pCtx->eip &= UINT32_C(0xffff);
6587 break;
6588
6589 case IEMMODE_32BIT:
6590 pCtx->eip += cbInstr;
6591 Assert(pCtx->rip <= UINT32_MAX);
6592 break;
6593
6594 case IEMMODE_64BIT:
6595 pCtx->rip += cbInstr;
6596 break;
6597 default: AssertFailed();
6598 }
6599}
6600
6601
6602#if 0
6603/**
6604 * Updates the RIP/EIP/IP to point to the next instruction.
6605 *
6606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6607 */
6608IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6609{
6610 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6611}
6612#endif
6613
6614
6615
6616/**
6617 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6618 *
6619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6620 * @param cbInstr The number of bytes to add.
6621 */
6622IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6623{
6624 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6625
6626 pCtx->eflags.Bits.u1RF = 0;
6627
6628 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6629#if ARCH_BITS >= 64
6630 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6631 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6632 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6633#else
6634 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6635 pCtx->rip += cbInstr;
6636 else
6637 {
6638 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6639 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6640 }
6641#endif
6642}
6643
6644
6645/**
6646 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6647 *
6648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6649 */
6650IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6651{
6652 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6653}
6654
6655
6656/**
6657 * Adds to the stack pointer.
6658 *
6659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6660 * @param pCtx The CPU context which SP/ESP/RSP should be
6661 * updated.
6662 * @param cbToAdd The number of bytes to add (8-bit!).
6663 */
6664DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6665{
6666 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6667 pCtx->rsp += cbToAdd;
6668 else if (pCtx->ss.Attr.n.u1DefBig)
6669 pCtx->esp += cbToAdd;
6670 else
6671 pCtx->sp += cbToAdd;
6672}
6673
6674
6675/**
6676 * Subtracts from the stack pointer.
6677 *
6678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6679 * @param pCtx The CPU context which SP/ESP/RSP should be
6680 * updated.
6681 * @param cbToSub The number of bytes to subtract (8-bit!).
6682 */
6683DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6684{
6685 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6686 pCtx->rsp -= cbToSub;
6687 else if (pCtx->ss.Attr.n.u1DefBig)
6688 pCtx->esp -= cbToSub;
6689 else
6690 pCtx->sp -= cbToSub;
6691}
6692
6693
6694/**
6695 * Adds to the temporary stack pointer.
6696 *
6697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6698 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6699 * @param cbToAdd The number of bytes to add (16-bit).
6700 * @param pCtx Where to get the current stack mode.
6701 */
6702DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6703{
6704 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6705 pTmpRsp->u += cbToAdd;
6706 else if (pCtx->ss.Attr.n.u1DefBig)
6707 pTmpRsp->DWords.dw0 += cbToAdd;
6708 else
6709 pTmpRsp->Words.w0 += cbToAdd;
6710}
6711
6712
6713/**
6714 * Subtracts from the temporary stack pointer.
6715 *
6716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6717 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6718 * @param cbToSub The number of bytes to subtract.
6719 * @param pCtx Where to get the current stack mode.
6720 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6721 * expecting that.
6722 */
6723DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6724{
6725 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6726 pTmpRsp->u -= cbToSub;
6727 else if (pCtx->ss.Attr.n.u1DefBig)
6728 pTmpRsp->DWords.dw0 -= cbToSub;
6729 else
6730 pTmpRsp->Words.w0 -= cbToSub;
6731}
6732
6733
6734/**
6735 * Calculates the effective stack address for a push of the specified size as
6736 * well as the new RSP value (upper bits may be masked).
6737 *
6738 * @returns Effective stack addressf for the push.
6739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6740 * @param pCtx Where to get the current stack mode.
6741 * @param cbItem The size of the stack item to pop.
6742 * @param puNewRsp Where to return the new RSP value.
6743 */
6744DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6745{
6746 RTUINT64U uTmpRsp;
6747 RTGCPTR GCPtrTop;
6748 uTmpRsp.u = pCtx->rsp;
6749
6750 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6751 GCPtrTop = uTmpRsp.u -= cbItem;
6752 else if (pCtx->ss.Attr.n.u1DefBig)
6753 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6754 else
6755 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6756 *puNewRsp = uTmpRsp.u;
6757 return GCPtrTop;
6758}
6759
6760
6761/**
6762 * Gets the current stack pointer and calculates the value after a pop of the
6763 * specified size.
6764 *
6765 * @returns Current stack pointer.
6766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6767 * @param pCtx Where to get the current stack mode.
6768 * @param cbItem The size of the stack item to pop.
6769 * @param puNewRsp Where to return the new RSP value.
6770 */
6771DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6772{
6773 RTUINT64U uTmpRsp;
6774 RTGCPTR GCPtrTop;
6775 uTmpRsp.u = pCtx->rsp;
6776
6777 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6778 {
6779 GCPtrTop = uTmpRsp.u;
6780 uTmpRsp.u += cbItem;
6781 }
6782 else if (pCtx->ss.Attr.n.u1DefBig)
6783 {
6784 GCPtrTop = uTmpRsp.DWords.dw0;
6785 uTmpRsp.DWords.dw0 += cbItem;
6786 }
6787 else
6788 {
6789 GCPtrTop = uTmpRsp.Words.w0;
6790 uTmpRsp.Words.w0 += cbItem;
6791 }
6792 *puNewRsp = uTmpRsp.u;
6793 return GCPtrTop;
6794}
6795
6796
6797/**
6798 * Calculates the effective stack address for a push of the specified size as
6799 * well as the new temporary RSP value (upper bits may be masked).
6800 *
6801 * @returns Effective stack addressf for the push.
6802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6803 * @param pCtx Where to get the current stack mode.
6804 * @param pTmpRsp The temporary stack pointer. This is updated.
6805 * @param cbItem The size of the stack item to pop.
6806 */
6807DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6808{
6809 RTGCPTR GCPtrTop;
6810
6811 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6812 GCPtrTop = pTmpRsp->u -= cbItem;
6813 else if (pCtx->ss.Attr.n.u1DefBig)
6814 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6815 else
6816 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6817 return GCPtrTop;
6818}
6819
6820
6821/**
6822 * Gets the effective stack address for a pop of the specified size and
6823 * calculates and updates the temporary RSP.
6824 *
6825 * @returns Current stack pointer.
6826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6827 * @param pCtx Where to get the current stack mode.
6828 * @param pTmpRsp The temporary stack pointer. This is updated.
6829 * @param cbItem The size of the stack item to pop.
6830 */
6831DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6832{
6833 RTGCPTR GCPtrTop;
6834 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6835 {
6836 GCPtrTop = pTmpRsp->u;
6837 pTmpRsp->u += cbItem;
6838 }
6839 else if (pCtx->ss.Attr.n.u1DefBig)
6840 {
6841 GCPtrTop = pTmpRsp->DWords.dw0;
6842 pTmpRsp->DWords.dw0 += cbItem;
6843 }
6844 else
6845 {
6846 GCPtrTop = pTmpRsp->Words.w0;
6847 pTmpRsp->Words.w0 += cbItem;
6848 }
6849 return GCPtrTop;
6850}
6851
6852/** @} */
6853
6854
6855/** @name FPU access and helpers.
6856 *
6857 * @{
6858 */
6859
6860
6861/**
6862 * Hook for preparing to use the host FPU.
6863 *
6864 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6865 *
6866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6867 */
6868DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6869{
6870#ifdef IN_RING3
6871 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6872#else
6873 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6874#endif
6875}
6876
6877
6878/**
6879 * Hook for preparing to use the host FPU for SSE.
6880 *
6881 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6882 *
6883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6884 */
6885DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6886{
6887 iemFpuPrepareUsage(pVCpu);
6888}
6889
6890
6891/**
6892 * Hook for preparing to use the host FPU for AVX.
6893 *
6894 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6895 *
6896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6897 */
6898DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6899{
6900 iemFpuPrepareUsage(pVCpu);
6901}
6902
6903
6904/**
6905 * Hook for actualizing the guest FPU state before the interpreter reads it.
6906 *
6907 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6908 *
6909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6910 */
6911DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6912{
6913#ifdef IN_RING3
6914 NOREF(pVCpu);
6915#else
6916 CPUMRZFpuStateActualizeForRead(pVCpu);
6917#endif
6918}
6919
6920
6921/**
6922 * Hook for actualizing the guest FPU state before the interpreter changes it.
6923 *
6924 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6925 *
6926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6927 */
6928DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6929{
6930#ifdef IN_RING3
6931 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6932#else
6933 CPUMRZFpuStateActualizeForChange(pVCpu);
6934#endif
6935}
6936
6937
6938/**
6939 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6940 * only.
6941 *
6942 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6943 *
6944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6945 */
6946DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6947{
6948#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6949 NOREF(pVCpu);
6950#else
6951 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6952#endif
6953}
6954
6955
6956/**
6957 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6958 * read+write.
6959 *
6960 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6961 *
6962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6963 */
6964DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6965{
6966#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6967 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6968#else
6969 CPUMRZFpuStateActualizeForChange(pVCpu);
6970#endif
6971}
6972
6973
6974/**
6975 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6976 * only.
6977 *
6978 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6979 *
6980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6981 */
6982DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6983{
6984#ifdef IN_RING3
6985 NOREF(pVCpu);
6986#else
6987 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6988#endif
6989}
6990
6991
6992/**
6993 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6994 * read+write.
6995 *
6996 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6997 *
6998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6999 */
7000DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7001{
7002#ifdef IN_RING3
7003 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7004#else
7005 CPUMRZFpuStateActualizeForChange(pVCpu);
7006#endif
7007}
7008
7009
7010/**
7011 * Stores a QNaN value into a FPU register.
7012 *
7013 * @param pReg Pointer to the register.
7014 */
7015DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7016{
7017 pReg->au32[0] = UINT32_C(0x00000000);
7018 pReg->au32[1] = UINT32_C(0xc0000000);
7019 pReg->au16[4] = UINT16_C(0xffff);
7020}
7021
7022
7023/**
7024 * Updates the FOP, FPU.CS and FPUIP registers.
7025 *
7026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7027 * @param pCtx The CPU context.
7028 * @param pFpuCtx The FPU context.
7029 */
7030DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
7031{
7032 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7033 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7034 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7035 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7036 {
7037 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7038 * happens in real mode here based on the fnsave and fnstenv images. */
7039 pFpuCtx->CS = 0;
7040 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
7041 }
7042 else
7043 {
7044 pFpuCtx->CS = pCtx->cs.Sel;
7045 pFpuCtx->FPUIP = pCtx->rip;
7046 }
7047}
7048
7049
7050/**
7051 * Updates the x87.DS and FPUDP registers.
7052 *
7053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7054 * @param pCtx The CPU context.
7055 * @param pFpuCtx The FPU context.
7056 * @param iEffSeg The effective segment register.
7057 * @param GCPtrEff The effective address relative to @a iEffSeg.
7058 */
7059DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7060{
7061 RTSEL sel;
7062 switch (iEffSeg)
7063 {
7064 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7065 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7066 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7067 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7068 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7069 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7070 default:
7071 AssertMsgFailed(("%d\n", iEffSeg));
7072 sel = pCtx->ds.Sel;
7073 }
7074 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7075 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7076 {
7077 pFpuCtx->DS = 0;
7078 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7079 }
7080 else
7081 {
7082 pFpuCtx->DS = sel;
7083 pFpuCtx->FPUDP = GCPtrEff;
7084 }
7085}
7086
7087
7088/**
7089 * Rotates the stack registers in the push direction.
7090 *
7091 * @param pFpuCtx The FPU context.
7092 * @remarks This is a complete waste of time, but fxsave stores the registers in
7093 * stack order.
7094 */
7095DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7096{
7097 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7098 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7099 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7100 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7101 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7102 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7103 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7104 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7105 pFpuCtx->aRegs[0].r80 = r80Tmp;
7106}
7107
7108
7109/**
7110 * Rotates the stack registers in the pop direction.
7111 *
7112 * @param pFpuCtx The FPU context.
7113 * @remarks This is a complete waste of time, but fxsave stores the registers in
7114 * stack order.
7115 */
7116DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7117{
7118 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7119 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7120 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7121 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7122 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7123 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7124 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7125 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7126 pFpuCtx->aRegs[7].r80 = r80Tmp;
7127}
7128
7129
7130/**
7131 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7132 * exception prevents it.
7133 *
7134 * @param pResult The FPU operation result to push.
7135 * @param pFpuCtx The FPU context.
7136 */
7137IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7138{
7139 /* Update FSW and bail if there are pending exceptions afterwards. */
7140 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7141 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7142 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7143 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7144 {
7145 pFpuCtx->FSW = fFsw;
7146 return;
7147 }
7148
7149 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7150 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7151 {
7152 /* All is fine, push the actual value. */
7153 pFpuCtx->FTW |= RT_BIT(iNewTop);
7154 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7155 }
7156 else if (pFpuCtx->FCW & X86_FCW_IM)
7157 {
7158 /* Masked stack overflow, push QNaN. */
7159 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7160 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7161 }
7162 else
7163 {
7164 /* Raise stack overflow, don't push anything. */
7165 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7166 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7167 return;
7168 }
7169
7170 fFsw &= ~X86_FSW_TOP_MASK;
7171 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7172 pFpuCtx->FSW = fFsw;
7173
7174 iemFpuRotateStackPush(pFpuCtx);
7175}
7176
7177
7178/**
7179 * Stores a result in a FPU register and updates the FSW and FTW.
7180 *
7181 * @param pFpuCtx The FPU context.
7182 * @param pResult The result to store.
7183 * @param iStReg Which FPU register to store it in.
7184 */
7185IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7186{
7187 Assert(iStReg < 8);
7188 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7189 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7190 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7191 pFpuCtx->FTW |= RT_BIT(iReg);
7192 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7193}
7194
7195
7196/**
7197 * Only updates the FPU status word (FSW) with the result of the current
7198 * instruction.
7199 *
7200 * @param pFpuCtx The FPU context.
7201 * @param u16FSW The FSW output of the current instruction.
7202 */
7203IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7204{
7205 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7206 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7207}
7208
7209
7210/**
7211 * Pops one item off the FPU stack if no pending exception prevents it.
7212 *
7213 * @param pFpuCtx The FPU context.
7214 */
7215IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7216{
7217 /* Check pending exceptions. */
7218 uint16_t uFSW = pFpuCtx->FSW;
7219 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7220 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7221 return;
7222
7223 /* TOP--. */
7224 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7225 uFSW &= ~X86_FSW_TOP_MASK;
7226 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7227 pFpuCtx->FSW = uFSW;
7228
7229 /* Mark the previous ST0 as empty. */
7230 iOldTop >>= X86_FSW_TOP_SHIFT;
7231 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7232
7233 /* Rotate the registers. */
7234 iemFpuRotateStackPop(pFpuCtx);
7235}
7236
7237
7238/**
7239 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7240 *
7241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7242 * @param pResult The FPU operation result to push.
7243 */
7244IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7245{
7246 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7247 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7248 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7249 iemFpuMaybePushResult(pResult, pFpuCtx);
7250}
7251
7252
7253/**
7254 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7255 * and sets FPUDP and FPUDS.
7256 *
7257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7258 * @param pResult The FPU operation result to push.
7259 * @param iEffSeg The effective segment register.
7260 * @param GCPtrEff The effective address relative to @a iEffSeg.
7261 */
7262IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7263{
7264 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7265 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7266 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7267 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7268 iemFpuMaybePushResult(pResult, pFpuCtx);
7269}
7270
7271
7272/**
7273 * Replace ST0 with the first value and push the second onto the FPU stack,
7274 * unless a pending exception prevents it.
7275 *
7276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7277 * @param pResult The FPU operation result to store and push.
7278 */
7279IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7280{
7281 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7282 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7283 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7284
7285 /* Update FSW and bail if there are pending exceptions afterwards. */
7286 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7287 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7288 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7289 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7290 {
7291 pFpuCtx->FSW = fFsw;
7292 return;
7293 }
7294
7295 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7296 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7297 {
7298 /* All is fine, push the actual value. */
7299 pFpuCtx->FTW |= RT_BIT(iNewTop);
7300 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7301 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7302 }
7303 else if (pFpuCtx->FCW & X86_FCW_IM)
7304 {
7305 /* Masked stack overflow, push QNaN. */
7306 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7307 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7308 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7309 }
7310 else
7311 {
7312 /* Raise stack overflow, don't push anything. */
7313 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7314 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7315 return;
7316 }
7317
7318 fFsw &= ~X86_FSW_TOP_MASK;
7319 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7320 pFpuCtx->FSW = fFsw;
7321
7322 iemFpuRotateStackPush(pFpuCtx);
7323}
7324
7325
7326/**
7327 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7328 * FOP.
7329 *
7330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7331 * @param pResult The result to store.
7332 * @param iStReg Which FPU register to store it in.
7333 */
7334IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7335{
7336 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7337 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7338 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7339 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7340}
7341
7342
7343/**
7344 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7345 * FOP, and then pops the stack.
7346 *
7347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7348 * @param pResult The result to store.
7349 * @param iStReg Which FPU register to store it in.
7350 */
7351IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7352{
7353 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7354 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7355 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7356 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7357 iemFpuMaybePopOne(pFpuCtx);
7358}
7359
7360
7361/**
7362 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7363 * FPUDP, and FPUDS.
7364 *
7365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7366 * @param pResult The result to store.
7367 * @param iStReg Which FPU register to store it in.
7368 * @param iEffSeg The effective memory operand selector register.
7369 * @param GCPtrEff The effective memory operand offset.
7370 */
7371IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7372 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7373{
7374 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7375 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7376 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7377 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7378 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7379}
7380
7381
7382/**
7383 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7384 * FPUDP, and FPUDS, and then pops the stack.
7385 *
7386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7387 * @param pResult The result to store.
7388 * @param iStReg Which FPU register to store it in.
7389 * @param iEffSeg The effective memory operand selector register.
7390 * @param GCPtrEff The effective memory operand offset.
7391 */
7392IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7393 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7394{
7395 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7396 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7397 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7398 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7399 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7400 iemFpuMaybePopOne(pFpuCtx);
7401}
7402
7403
7404/**
7405 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7406 *
7407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7408 */
7409IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7410{
7411 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7412 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7413 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7414}
7415
7416
7417/**
7418 * Marks the specified stack register as free (for FFREE).
7419 *
7420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7421 * @param iStReg The register to free.
7422 */
7423IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7424{
7425 Assert(iStReg < 8);
7426 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7427 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7428 pFpuCtx->FTW &= ~RT_BIT(iReg);
7429}
7430
7431
7432/**
7433 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7434 *
7435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7436 */
7437IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7438{
7439 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7440 uint16_t uFsw = pFpuCtx->FSW;
7441 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7442 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7443 uFsw &= ~X86_FSW_TOP_MASK;
7444 uFsw |= uTop;
7445 pFpuCtx->FSW = uFsw;
7446}
7447
7448
7449/**
7450 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7451 *
7452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7453 */
7454IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7455{
7456 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7457 uint16_t uFsw = pFpuCtx->FSW;
7458 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7459 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7460 uFsw &= ~X86_FSW_TOP_MASK;
7461 uFsw |= uTop;
7462 pFpuCtx->FSW = uFsw;
7463}
7464
7465
7466/**
7467 * Updates the FSW, FOP, FPUIP, and FPUCS.
7468 *
7469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7470 * @param u16FSW The FSW from the current instruction.
7471 */
7472IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7473{
7474 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7475 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7476 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7477 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7478}
7479
7480
7481/**
7482 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7483 *
7484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7485 * @param u16FSW The FSW from the current instruction.
7486 */
7487IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7488{
7489 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7490 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7491 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7492 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7493 iemFpuMaybePopOne(pFpuCtx);
7494}
7495
7496
7497/**
7498 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7499 *
7500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7501 * @param u16FSW The FSW from the current instruction.
7502 * @param iEffSeg The effective memory operand selector register.
7503 * @param GCPtrEff The effective memory operand offset.
7504 */
7505IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7506{
7507 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7508 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7509 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7510 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7511 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7512}
7513
7514
7515/**
7516 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7517 *
7518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7519 * @param u16FSW The FSW from the current instruction.
7520 */
7521IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7522{
7523 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7524 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7525 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7526 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7527 iemFpuMaybePopOne(pFpuCtx);
7528 iemFpuMaybePopOne(pFpuCtx);
7529}
7530
7531
7532/**
7533 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7534 *
7535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7536 * @param u16FSW The FSW from the current instruction.
7537 * @param iEffSeg The effective memory operand selector register.
7538 * @param GCPtrEff The effective memory operand offset.
7539 */
7540IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7541{
7542 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7543 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7544 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7545 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7546 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7547 iemFpuMaybePopOne(pFpuCtx);
7548}
7549
7550
7551/**
7552 * Worker routine for raising an FPU stack underflow exception.
7553 *
7554 * @param pFpuCtx The FPU context.
7555 * @param iStReg The stack register being accessed.
7556 */
7557IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7558{
7559 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7560 if (pFpuCtx->FCW & X86_FCW_IM)
7561 {
7562 /* Masked underflow. */
7563 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7564 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7565 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7566 if (iStReg != UINT8_MAX)
7567 {
7568 pFpuCtx->FTW |= RT_BIT(iReg);
7569 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7570 }
7571 }
7572 else
7573 {
7574 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7575 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7576 }
7577}
7578
7579
7580/**
7581 * Raises a FPU stack underflow exception.
7582 *
7583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7584 * @param iStReg The destination register that should be loaded
7585 * with QNaN if \#IS is not masked. Specify
7586 * UINT8_MAX if none (like for fcom).
7587 */
7588DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7589{
7590 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7591 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7592 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7593 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7594}
7595
7596
7597DECL_NO_INLINE(IEM_STATIC, void)
7598iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7599{
7600 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7601 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7602 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7603 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7604 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7605}
7606
7607
7608DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7609{
7610 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7611 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7612 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7613 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7614 iemFpuMaybePopOne(pFpuCtx);
7615}
7616
7617
7618DECL_NO_INLINE(IEM_STATIC, void)
7619iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7620{
7621 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7622 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7623 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7624 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7625 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7626 iemFpuMaybePopOne(pFpuCtx);
7627}
7628
7629
7630DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7631{
7632 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7633 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7634 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7635 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7636 iemFpuMaybePopOne(pFpuCtx);
7637 iemFpuMaybePopOne(pFpuCtx);
7638}
7639
7640
7641DECL_NO_INLINE(IEM_STATIC, void)
7642iemFpuStackPushUnderflow(PVMCPU pVCpu)
7643{
7644 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7645 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7646 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7647
7648 if (pFpuCtx->FCW & X86_FCW_IM)
7649 {
7650 /* Masked overflow - Push QNaN. */
7651 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7652 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7653 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7654 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7655 pFpuCtx->FTW |= RT_BIT(iNewTop);
7656 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7657 iemFpuRotateStackPush(pFpuCtx);
7658 }
7659 else
7660 {
7661 /* Exception pending - don't change TOP or the register stack. */
7662 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7663 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7664 }
7665}
7666
7667
7668DECL_NO_INLINE(IEM_STATIC, void)
7669iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7670{
7671 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7672 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7673 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7674
7675 if (pFpuCtx->FCW & X86_FCW_IM)
7676 {
7677 /* Masked overflow - Push QNaN. */
7678 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7679 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7680 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7681 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7682 pFpuCtx->FTW |= RT_BIT(iNewTop);
7683 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7684 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7685 iemFpuRotateStackPush(pFpuCtx);
7686 }
7687 else
7688 {
7689 /* Exception pending - don't change TOP or the register stack. */
7690 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7691 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7692 }
7693}
7694
7695
7696/**
7697 * Worker routine for raising an FPU stack overflow exception on a push.
7698 *
7699 * @param pFpuCtx The FPU context.
7700 */
7701IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7702{
7703 if (pFpuCtx->FCW & X86_FCW_IM)
7704 {
7705 /* Masked overflow. */
7706 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7707 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7708 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7709 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7710 pFpuCtx->FTW |= RT_BIT(iNewTop);
7711 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7712 iemFpuRotateStackPush(pFpuCtx);
7713 }
7714 else
7715 {
7716 /* Exception pending - don't change TOP or the register stack. */
7717 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7718 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7719 }
7720}
7721
7722
7723/**
7724 * Raises a FPU stack overflow exception on a push.
7725 *
7726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7727 */
7728DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7729{
7730 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7731 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7732 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7733 iemFpuStackPushOverflowOnly(pFpuCtx);
7734}
7735
7736
7737/**
7738 * Raises a FPU stack overflow exception on a push with a memory operand.
7739 *
7740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7741 * @param iEffSeg The effective memory operand selector register.
7742 * @param GCPtrEff The effective memory operand offset.
7743 */
7744DECL_NO_INLINE(IEM_STATIC, void)
7745iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7746{
7747 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7748 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7749 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7750 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7751 iemFpuStackPushOverflowOnly(pFpuCtx);
7752}
7753
7754
7755IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7756{
7757 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7758 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7759 if (pFpuCtx->FTW & RT_BIT(iReg))
7760 return VINF_SUCCESS;
7761 return VERR_NOT_FOUND;
7762}
7763
7764
7765IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7766{
7767 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7768 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7769 if (pFpuCtx->FTW & RT_BIT(iReg))
7770 {
7771 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7772 return VINF_SUCCESS;
7773 }
7774 return VERR_NOT_FOUND;
7775}
7776
7777
7778IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7779 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7780{
7781 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7782 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7783 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7784 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7785 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7786 {
7787 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7788 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7789 return VINF_SUCCESS;
7790 }
7791 return VERR_NOT_FOUND;
7792}
7793
7794
7795IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7796{
7797 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7798 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7799 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7800 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7801 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7802 {
7803 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7804 return VINF_SUCCESS;
7805 }
7806 return VERR_NOT_FOUND;
7807}
7808
7809
7810/**
7811 * Updates the FPU exception status after FCW is changed.
7812 *
7813 * @param pFpuCtx The FPU context.
7814 */
7815IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7816{
7817 uint16_t u16Fsw = pFpuCtx->FSW;
7818 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7819 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7820 else
7821 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7822 pFpuCtx->FSW = u16Fsw;
7823}
7824
7825
7826/**
7827 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7828 *
7829 * @returns The full FTW.
7830 * @param pFpuCtx The FPU context.
7831 */
7832IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7833{
7834 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7835 uint16_t u16Ftw = 0;
7836 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7837 for (unsigned iSt = 0; iSt < 8; iSt++)
7838 {
7839 unsigned const iReg = (iSt + iTop) & 7;
7840 if (!(u8Ftw & RT_BIT(iReg)))
7841 u16Ftw |= 3 << (iReg * 2); /* empty */
7842 else
7843 {
7844 uint16_t uTag;
7845 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7846 if (pr80Reg->s.uExponent == 0x7fff)
7847 uTag = 2; /* Exponent is all 1's => Special. */
7848 else if (pr80Reg->s.uExponent == 0x0000)
7849 {
7850 if (pr80Reg->s.u64Mantissa == 0x0000)
7851 uTag = 1; /* All bits are zero => Zero. */
7852 else
7853 uTag = 2; /* Must be special. */
7854 }
7855 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7856 uTag = 0; /* Valid. */
7857 else
7858 uTag = 2; /* Must be special. */
7859
7860 u16Ftw |= uTag << (iReg * 2); /* empty */
7861 }
7862 }
7863
7864 return u16Ftw;
7865}
7866
7867
7868/**
7869 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7870 *
7871 * @returns The compressed FTW.
7872 * @param u16FullFtw The full FTW to convert.
7873 */
7874IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7875{
7876 uint8_t u8Ftw = 0;
7877 for (unsigned i = 0; i < 8; i++)
7878 {
7879 if ((u16FullFtw & 3) != 3 /*empty*/)
7880 u8Ftw |= RT_BIT(i);
7881 u16FullFtw >>= 2;
7882 }
7883
7884 return u8Ftw;
7885}
7886
7887/** @} */
7888
7889
7890/** @name Memory access.
7891 *
7892 * @{
7893 */
7894
7895
7896/**
7897 * Updates the IEMCPU::cbWritten counter if applicable.
7898 *
7899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7900 * @param fAccess The access being accounted for.
7901 * @param cbMem The access size.
7902 */
7903DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7904{
7905 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7906 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7907 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7908}
7909
7910
7911/**
7912 * Checks if the given segment can be written to, raise the appropriate
7913 * exception if not.
7914 *
7915 * @returns VBox strict status code.
7916 *
7917 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7918 * @param pHid Pointer to the hidden register.
7919 * @param iSegReg The register number.
7920 * @param pu64BaseAddr Where to return the base address to use for the
7921 * segment. (In 64-bit code it may differ from the
7922 * base in the hidden segment.)
7923 */
7924IEM_STATIC VBOXSTRICTRC
7925iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7926{
7927 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7928 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7929 else
7930 {
7931 if (!pHid->Attr.n.u1Present)
7932 {
7933 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7934 AssertRelease(uSel == 0);
7935 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7936 return iemRaiseGeneralProtectionFault0(pVCpu);
7937 }
7938
7939 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7940 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7941 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7942 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7943 *pu64BaseAddr = pHid->u64Base;
7944 }
7945 return VINF_SUCCESS;
7946}
7947
7948
7949/**
7950 * Checks if the given segment can be read from, raise the appropriate
7951 * exception if not.
7952 *
7953 * @returns VBox strict status code.
7954 *
7955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7956 * @param pHid Pointer to the hidden register.
7957 * @param iSegReg The register number.
7958 * @param pu64BaseAddr Where to return the base address to use for the
7959 * segment. (In 64-bit code it may differ from the
7960 * base in the hidden segment.)
7961 */
7962IEM_STATIC VBOXSTRICTRC
7963iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7964{
7965 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7966 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7967 else
7968 {
7969 if (!pHid->Attr.n.u1Present)
7970 {
7971 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7972 AssertRelease(uSel == 0);
7973 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7974 return iemRaiseGeneralProtectionFault0(pVCpu);
7975 }
7976
7977 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7978 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7979 *pu64BaseAddr = pHid->u64Base;
7980 }
7981 return VINF_SUCCESS;
7982}
7983
7984
7985/**
7986 * Applies the segment limit, base and attributes.
7987 *
7988 * This may raise a \#GP or \#SS.
7989 *
7990 * @returns VBox strict status code.
7991 *
7992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7993 * @param fAccess The kind of access which is being performed.
7994 * @param iSegReg The index of the segment register to apply.
7995 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7996 * TSS, ++).
7997 * @param cbMem The access size.
7998 * @param pGCPtrMem Pointer to the guest memory address to apply
7999 * segmentation to. Input and output parameter.
8000 */
8001IEM_STATIC VBOXSTRICTRC
8002iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8003{
8004 if (iSegReg == UINT8_MAX)
8005 return VINF_SUCCESS;
8006
8007 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8008 switch (pVCpu->iem.s.enmCpuMode)
8009 {
8010 case IEMMODE_16BIT:
8011 case IEMMODE_32BIT:
8012 {
8013 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8014 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8015
8016 if ( pSel->Attr.n.u1Present
8017 && !pSel->Attr.n.u1Unusable)
8018 {
8019 Assert(pSel->Attr.n.u1DescType);
8020 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8021 {
8022 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8023 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8024 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8025
8026 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8027 {
8028 /** @todo CPL check. */
8029 }
8030
8031 /*
8032 * There are two kinds of data selectors, normal and expand down.
8033 */
8034 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8035 {
8036 if ( GCPtrFirst32 > pSel->u32Limit
8037 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8038 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8039 }
8040 else
8041 {
8042 /*
8043 * The upper boundary is defined by the B bit, not the G bit!
8044 */
8045 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8046 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8047 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8048 }
8049 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8050 }
8051 else
8052 {
8053
8054 /*
8055 * Code selector and usually be used to read thru, writing is
8056 * only permitted in real and V8086 mode.
8057 */
8058 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8059 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8060 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8061 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8062 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8063
8064 if ( GCPtrFirst32 > pSel->u32Limit
8065 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8066 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8067
8068 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8069 {
8070 /** @todo CPL check. */
8071 }
8072
8073 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8074 }
8075 }
8076 else
8077 return iemRaiseGeneralProtectionFault0(pVCpu);
8078 return VINF_SUCCESS;
8079 }
8080
8081 case IEMMODE_64BIT:
8082 {
8083 RTGCPTR GCPtrMem = *pGCPtrMem;
8084 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8085 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8086
8087 Assert(cbMem >= 1);
8088 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8089 return VINF_SUCCESS;
8090 return iemRaiseGeneralProtectionFault0(pVCpu);
8091 }
8092
8093 default:
8094 AssertFailedReturn(VERR_IEM_IPE_7);
8095 }
8096}
8097
8098
8099/**
8100 * Translates a virtual address to a physical physical address and checks if we
8101 * can access the page as specified.
8102 *
8103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8104 * @param GCPtrMem The virtual address.
8105 * @param fAccess The intended access.
8106 * @param pGCPhysMem Where to return the physical address.
8107 */
8108IEM_STATIC VBOXSTRICTRC
8109iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8110{
8111 /** @todo Need a different PGM interface here. We're currently using
8112 * generic / REM interfaces. this won't cut it for R0 & RC. */
8113 RTGCPHYS GCPhys;
8114 uint64_t fFlags;
8115 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8116 if (RT_FAILURE(rc))
8117 {
8118 /** @todo Check unassigned memory in unpaged mode. */
8119 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8120 *pGCPhysMem = NIL_RTGCPHYS;
8121 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8122 }
8123
8124 /* If the page is writable and does not have the no-exec bit set, all
8125 access is allowed. Otherwise we'll have to check more carefully... */
8126 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8127 {
8128 /* Write to read only memory? */
8129 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8130 && !(fFlags & X86_PTE_RW)
8131 && ( (pVCpu->iem.s.uCpl == 3
8132 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8133 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8134 {
8135 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8136 *pGCPhysMem = NIL_RTGCPHYS;
8137 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8138 }
8139
8140 /* Kernel memory accessed by userland? */
8141 if ( !(fFlags & X86_PTE_US)
8142 && pVCpu->iem.s.uCpl == 3
8143 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8144 {
8145 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8146 *pGCPhysMem = NIL_RTGCPHYS;
8147 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8148 }
8149
8150 /* Executing non-executable memory? */
8151 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8152 && (fFlags & X86_PTE_PAE_NX)
8153 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8154 {
8155 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8156 *pGCPhysMem = NIL_RTGCPHYS;
8157 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8158 VERR_ACCESS_DENIED);
8159 }
8160 }
8161
8162 /*
8163 * Set the dirty / access flags.
8164 * ASSUMES this is set when the address is translated rather than on committ...
8165 */
8166 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8167 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8168 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8169 {
8170 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8171 AssertRC(rc2);
8172 }
8173
8174 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8175 *pGCPhysMem = GCPhys;
8176 return VINF_SUCCESS;
8177}
8178
8179
8180
8181/**
8182 * Maps a physical page.
8183 *
8184 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8186 * @param GCPhysMem The physical address.
8187 * @param fAccess The intended access.
8188 * @param ppvMem Where to return the mapping address.
8189 * @param pLock The PGM lock.
8190 */
8191IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8192{
8193#ifdef IEM_VERIFICATION_MODE_FULL
8194 /* Force the alternative path so we can ignore writes. */
8195 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8196 {
8197 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8198 {
8199 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8200 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8201 if (RT_FAILURE(rc2))
8202 pVCpu->iem.s.fProblematicMemory = true;
8203 }
8204 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8205 }
8206#endif
8207#ifdef IEM_LOG_MEMORY_WRITES
8208 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8209 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8210#endif
8211#ifdef IEM_VERIFICATION_MODE_MINIMAL
8212 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8213#endif
8214
8215 /** @todo This API may require some improving later. A private deal with PGM
8216 * regarding locking and unlocking needs to be struct. A couple of TLBs
8217 * living in PGM, but with publicly accessible inlined access methods
8218 * could perhaps be an even better solution. */
8219 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8220 GCPhysMem,
8221 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8222 pVCpu->iem.s.fBypassHandlers,
8223 ppvMem,
8224 pLock);
8225 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8226 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8227
8228#ifdef IEM_VERIFICATION_MODE_FULL
8229 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8230 pVCpu->iem.s.fProblematicMemory = true;
8231#endif
8232 return rc;
8233}
8234
8235
8236/**
8237 * Unmap a page previously mapped by iemMemPageMap.
8238 *
8239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8240 * @param GCPhysMem The physical address.
8241 * @param fAccess The intended access.
8242 * @param pvMem What iemMemPageMap returned.
8243 * @param pLock The PGM lock.
8244 */
8245DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8246{
8247 NOREF(pVCpu);
8248 NOREF(GCPhysMem);
8249 NOREF(fAccess);
8250 NOREF(pvMem);
8251 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8252}
8253
8254
8255/**
8256 * Looks up a memory mapping entry.
8257 *
8258 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8260 * @param pvMem The memory address.
8261 * @param fAccess The access to.
8262 */
8263DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8264{
8265 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8266 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8267 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8268 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8269 return 0;
8270 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8271 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8272 return 1;
8273 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8274 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8275 return 2;
8276 return VERR_NOT_FOUND;
8277}
8278
8279
8280/**
8281 * Finds a free memmap entry when using iNextMapping doesn't work.
8282 *
8283 * @returns Memory mapping index, 1024 on failure.
8284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8285 */
8286IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8287{
8288 /*
8289 * The easy case.
8290 */
8291 if (pVCpu->iem.s.cActiveMappings == 0)
8292 {
8293 pVCpu->iem.s.iNextMapping = 1;
8294 return 0;
8295 }
8296
8297 /* There should be enough mappings for all instructions. */
8298 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8299
8300 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8301 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8302 return i;
8303
8304 AssertFailedReturn(1024);
8305}
8306
8307
8308/**
8309 * Commits a bounce buffer that needs writing back and unmaps it.
8310 *
8311 * @returns Strict VBox status code.
8312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8313 * @param iMemMap The index of the buffer to commit.
8314 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8315 * Always false in ring-3, obviously.
8316 */
8317IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8318{
8319 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8320 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8321#ifdef IN_RING3
8322 Assert(!fPostponeFail);
8323 RT_NOREF_PV(fPostponeFail);
8324#endif
8325
8326 /*
8327 * Do the writing.
8328 */
8329#ifndef IEM_VERIFICATION_MODE_MINIMAL
8330 PVM pVM = pVCpu->CTX_SUFF(pVM);
8331 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8332 && !IEM_VERIFICATION_ENABLED(pVCpu))
8333 {
8334 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8335 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8336 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8337 if (!pVCpu->iem.s.fBypassHandlers)
8338 {
8339 /*
8340 * Carefully and efficiently dealing with access handler return
8341 * codes make this a little bloated.
8342 */
8343 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8344 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8345 pbBuf,
8346 cbFirst,
8347 PGMACCESSORIGIN_IEM);
8348 if (rcStrict == VINF_SUCCESS)
8349 {
8350 if (cbSecond)
8351 {
8352 rcStrict = PGMPhysWrite(pVM,
8353 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8354 pbBuf + cbFirst,
8355 cbSecond,
8356 PGMACCESSORIGIN_IEM);
8357 if (rcStrict == VINF_SUCCESS)
8358 { /* nothing */ }
8359 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8360 {
8361 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8362 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8363 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8364 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8365 }
8366# ifndef IN_RING3
8367 else if (fPostponeFail)
8368 {
8369 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8370 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8371 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8372 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8373 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8374 return iemSetPassUpStatus(pVCpu, rcStrict);
8375 }
8376# endif
8377 else
8378 {
8379 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8380 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8381 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8382 return rcStrict;
8383 }
8384 }
8385 }
8386 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8387 {
8388 if (!cbSecond)
8389 {
8390 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8392 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8393 }
8394 else
8395 {
8396 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8397 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8398 pbBuf + cbFirst,
8399 cbSecond,
8400 PGMACCESSORIGIN_IEM);
8401 if (rcStrict2 == VINF_SUCCESS)
8402 {
8403 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8404 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8405 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8406 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8407 }
8408 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8409 {
8410 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8412 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8413 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8414 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8415 }
8416# ifndef IN_RING3
8417 else if (fPostponeFail)
8418 {
8419 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8420 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8421 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8422 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8423 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8424 return iemSetPassUpStatus(pVCpu, rcStrict);
8425 }
8426# endif
8427 else
8428 {
8429 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8430 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8431 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8432 return rcStrict2;
8433 }
8434 }
8435 }
8436# ifndef IN_RING3
8437 else if (fPostponeFail)
8438 {
8439 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8440 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8441 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8442 if (!cbSecond)
8443 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8444 else
8445 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8446 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8447 return iemSetPassUpStatus(pVCpu, rcStrict);
8448 }
8449# endif
8450 else
8451 {
8452 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8453 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8454 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8455 return rcStrict;
8456 }
8457 }
8458 else
8459 {
8460 /*
8461 * No access handlers, much simpler.
8462 */
8463 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8464 if (RT_SUCCESS(rc))
8465 {
8466 if (cbSecond)
8467 {
8468 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8469 if (RT_SUCCESS(rc))
8470 { /* likely */ }
8471 else
8472 {
8473 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8474 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8475 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8476 return rc;
8477 }
8478 }
8479 }
8480 else
8481 {
8482 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8483 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8484 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8485 return rc;
8486 }
8487 }
8488 }
8489#endif
8490
8491#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8492 /*
8493 * Record the write(s).
8494 */
8495 if (!pVCpu->iem.s.fNoRem)
8496 {
8497 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8498 if (pEvtRec)
8499 {
8500 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8501 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8502 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8503 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8504 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8505 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8506 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8507 }
8508 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8509 {
8510 pEvtRec = iemVerifyAllocRecord(pVCpu);
8511 if (pEvtRec)
8512 {
8513 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8514 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8515 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8516 memcpy(pEvtRec->u.RamWrite.ab,
8517 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8518 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8519 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8520 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8521 }
8522 }
8523 }
8524#endif
8525#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8526 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8527 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8528 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8529 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8530 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8531 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8532
8533 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8534 g_cbIemWrote = cbWrote;
8535 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8536#endif
8537
8538 /*
8539 * Free the mapping entry.
8540 */
8541 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8542 Assert(pVCpu->iem.s.cActiveMappings != 0);
8543 pVCpu->iem.s.cActiveMappings--;
8544 return VINF_SUCCESS;
8545}
8546
8547
8548/**
8549 * iemMemMap worker that deals with a request crossing pages.
8550 */
8551IEM_STATIC VBOXSTRICTRC
8552iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8553{
8554 /*
8555 * Do the address translations.
8556 */
8557 RTGCPHYS GCPhysFirst;
8558 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8559 if (rcStrict != VINF_SUCCESS)
8560 return rcStrict;
8561
8562 RTGCPHYS GCPhysSecond;
8563 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8564 fAccess, &GCPhysSecond);
8565 if (rcStrict != VINF_SUCCESS)
8566 return rcStrict;
8567 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8568
8569 PVM pVM = pVCpu->CTX_SUFF(pVM);
8570#ifdef IEM_VERIFICATION_MODE_FULL
8571 /*
8572 * Detect problematic memory when verifying so we can select
8573 * the right execution engine. (TLB: Redo this.)
8574 */
8575 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8576 {
8577 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8578 if (RT_SUCCESS(rc2))
8579 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8580 if (RT_FAILURE(rc2))
8581 pVCpu->iem.s.fProblematicMemory = true;
8582 }
8583#endif
8584
8585
8586 /*
8587 * Read in the current memory content if it's a read, execute or partial
8588 * write access.
8589 */
8590 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8591 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8592 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8593
8594 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8595 {
8596 if (!pVCpu->iem.s.fBypassHandlers)
8597 {
8598 /*
8599 * Must carefully deal with access handler status codes here,
8600 * makes the code a bit bloated.
8601 */
8602 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8603 if (rcStrict == VINF_SUCCESS)
8604 {
8605 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8606 if (rcStrict == VINF_SUCCESS)
8607 { /*likely */ }
8608 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8609 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8610 else
8611 {
8612 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8613 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8614 return rcStrict;
8615 }
8616 }
8617 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8618 {
8619 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8620 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8621 {
8622 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8623 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8624 }
8625 else
8626 {
8627 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8628 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8629 return rcStrict2;
8630 }
8631 }
8632 else
8633 {
8634 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8635 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8636 return rcStrict;
8637 }
8638 }
8639 else
8640 {
8641 /*
8642 * No informational status codes here, much more straight forward.
8643 */
8644 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8645 if (RT_SUCCESS(rc))
8646 {
8647 Assert(rc == VINF_SUCCESS);
8648 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8649 if (RT_SUCCESS(rc))
8650 Assert(rc == VINF_SUCCESS);
8651 else
8652 {
8653 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8654 return rc;
8655 }
8656 }
8657 else
8658 {
8659 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8660 return rc;
8661 }
8662 }
8663
8664#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8665 if ( !pVCpu->iem.s.fNoRem
8666 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8667 {
8668 /*
8669 * Record the reads.
8670 */
8671 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8672 if (pEvtRec)
8673 {
8674 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8675 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8676 pEvtRec->u.RamRead.cb = cbFirstPage;
8677 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8678 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8679 }
8680 pEvtRec = iemVerifyAllocRecord(pVCpu);
8681 if (pEvtRec)
8682 {
8683 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8684 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8685 pEvtRec->u.RamRead.cb = cbSecondPage;
8686 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8687 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8688 }
8689 }
8690#endif
8691 }
8692#ifdef VBOX_STRICT
8693 else
8694 memset(pbBuf, 0xcc, cbMem);
8695 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8696 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8697#endif
8698
8699 /*
8700 * Commit the bounce buffer entry.
8701 */
8702 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8703 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8704 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8705 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8706 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8707 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8708 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8709 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8710 pVCpu->iem.s.cActiveMappings++;
8711
8712 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8713 *ppvMem = pbBuf;
8714 return VINF_SUCCESS;
8715}
8716
8717
8718/**
8719 * iemMemMap woker that deals with iemMemPageMap failures.
8720 */
8721IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8722 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8723{
8724 /*
8725 * Filter out conditions we can handle and the ones which shouldn't happen.
8726 */
8727 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8728 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8729 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8730 {
8731 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8732 return rcMap;
8733 }
8734 pVCpu->iem.s.cPotentialExits++;
8735
8736 /*
8737 * Read in the current memory content if it's a read, execute or partial
8738 * write access.
8739 */
8740 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8741 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8742 {
8743 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8744 memset(pbBuf, 0xff, cbMem);
8745 else
8746 {
8747 int rc;
8748 if (!pVCpu->iem.s.fBypassHandlers)
8749 {
8750 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8751 if (rcStrict == VINF_SUCCESS)
8752 { /* nothing */ }
8753 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8754 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8755 else
8756 {
8757 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8758 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8759 return rcStrict;
8760 }
8761 }
8762 else
8763 {
8764 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8765 if (RT_SUCCESS(rc))
8766 { /* likely */ }
8767 else
8768 {
8769 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8770 GCPhysFirst, rc));
8771 return rc;
8772 }
8773 }
8774 }
8775
8776#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8777 if ( !pVCpu->iem.s.fNoRem
8778 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8779 {
8780 /*
8781 * Record the read.
8782 */
8783 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8784 if (pEvtRec)
8785 {
8786 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8787 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8788 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8789 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8790 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8791 }
8792 }
8793#endif
8794 }
8795#ifdef VBOX_STRICT
8796 else
8797 memset(pbBuf, 0xcc, cbMem);
8798#endif
8799#ifdef VBOX_STRICT
8800 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8801 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8802#endif
8803
8804 /*
8805 * Commit the bounce buffer entry.
8806 */
8807 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8808 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8809 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8810 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8811 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8812 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8813 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8814 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8815 pVCpu->iem.s.cActiveMappings++;
8816
8817 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8818 *ppvMem = pbBuf;
8819 return VINF_SUCCESS;
8820}
8821
8822
8823
8824/**
8825 * Maps the specified guest memory for the given kind of access.
8826 *
8827 * This may be using bounce buffering of the memory if it's crossing a page
8828 * boundary or if there is an access handler installed for any of it. Because
8829 * of lock prefix guarantees, we're in for some extra clutter when this
8830 * happens.
8831 *
8832 * This may raise a \#GP, \#SS, \#PF or \#AC.
8833 *
8834 * @returns VBox strict status code.
8835 *
8836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8837 * @param ppvMem Where to return the pointer to the mapped
8838 * memory.
8839 * @param cbMem The number of bytes to map. This is usually 1,
8840 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8841 * string operations it can be up to a page.
8842 * @param iSegReg The index of the segment register to use for
8843 * this access. The base and limits are checked.
8844 * Use UINT8_MAX to indicate that no segmentation
8845 * is required (for IDT, GDT and LDT accesses).
8846 * @param GCPtrMem The address of the guest memory.
8847 * @param fAccess How the memory is being accessed. The
8848 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8849 * how to map the memory, while the
8850 * IEM_ACCESS_WHAT_XXX bit is used when raising
8851 * exceptions.
8852 */
8853IEM_STATIC VBOXSTRICTRC
8854iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8855{
8856 /*
8857 * Check the input and figure out which mapping entry to use.
8858 */
8859 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8860 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8861 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8862
8863 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8864 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8865 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8866 {
8867 iMemMap = iemMemMapFindFree(pVCpu);
8868 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8869 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8870 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8871 pVCpu->iem.s.aMemMappings[2].fAccess),
8872 VERR_IEM_IPE_9);
8873 }
8874
8875 /*
8876 * Map the memory, checking that we can actually access it. If something
8877 * slightly complicated happens, fall back on bounce buffering.
8878 */
8879 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8880 if (rcStrict != VINF_SUCCESS)
8881 return rcStrict;
8882
8883 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8884 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8885
8886 RTGCPHYS GCPhysFirst;
8887 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8888 if (rcStrict != VINF_SUCCESS)
8889 return rcStrict;
8890
8891 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8892 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8893 if (fAccess & IEM_ACCESS_TYPE_READ)
8894 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8895
8896 void *pvMem;
8897 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8898 if (rcStrict != VINF_SUCCESS)
8899 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8900
8901 /*
8902 * Fill in the mapping table entry.
8903 */
8904 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8905 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8906 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8907 pVCpu->iem.s.cActiveMappings++;
8908
8909 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8910 *ppvMem = pvMem;
8911 return VINF_SUCCESS;
8912}
8913
8914
8915/**
8916 * Commits the guest memory if bounce buffered and unmaps it.
8917 *
8918 * @returns Strict VBox status code.
8919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8920 * @param pvMem The mapping.
8921 * @param fAccess The kind of access.
8922 */
8923IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8924{
8925 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8926 AssertReturn(iMemMap >= 0, iMemMap);
8927
8928 /* If it's bounce buffered, we may need to write back the buffer. */
8929 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8930 {
8931 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8932 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8933 }
8934 /* Otherwise unlock it. */
8935 else
8936 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8937
8938 /* Free the entry. */
8939 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8940 Assert(pVCpu->iem.s.cActiveMappings != 0);
8941 pVCpu->iem.s.cActiveMappings--;
8942 return VINF_SUCCESS;
8943}
8944
8945#ifdef IEM_WITH_SETJMP
8946
8947/**
8948 * Maps the specified guest memory for the given kind of access, longjmp on
8949 * error.
8950 *
8951 * This may be using bounce buffering of the memory if it's crossing a page
8952 * boundary or if there is an access handler installed for any of it. Because
8953 * of lock prefix guarantees, we're in for some extra clutter when this
8954 * happens.
8955 *
8956 * This may raise a \#GP, \#SS, \#PF or \#AC.
8957 *
8958 * @returns Pointer to the mapped memory.
8959 *
8960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8961 * @param cbMem The number of bytes to map. This is usually 1,
8962 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8963 * string operations it can be up to a page.
8964 * @param iSegReg The index of the segment register to use for
8965 * this access. The base and limits are checked.
8966 * Use UINT8_MAX to indicate that no segmentation
8967 * is required (for IDT, GDT and LDT accesses).
8968 * @param GCPtrMem The address of the guest memory.
8969 * @param fAccess How the memory is being accessed. The
8970 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8971 * how to map the memory, while the
8972 * IEM_ACCESS_WHAT_XXX bit is used when raising
8973 * exceptions.
8974 */
8975IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8976{
8977 /*
8978 * Check the input and figure out which mapping entry to use.
8979 */
8980 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8981 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8982 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8983
8984 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8985 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8986 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8987 {
8988 iMemMap = iemMemMapFindFree(pVCpu);
8989 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8990 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8991 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8992 pVCpu->iem.s.aMemMappings[2].fAccess),
8993 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8994 }
8995
8996 /*
8997 * Map the memory, checking that we can actually access it. If something
8998 * slightly complicated happens, fall back on bounce buffering.
8999 */
9000 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9001 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9002 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9003
9004 /* Crossing a page boundary? */
9005 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9006 { /* No (likely). */ }
9007 else
9008 {
9009 void *pvMem;
9010 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9011 if (rcStrict == VINF_SUCCESS)
9012 return pvMem;
9013 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9014 }
9015
9016 RTGCPHYS GCPhysFirst;
9017 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9018 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9019 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9020
9021 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9022 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9023 if (fAccess & IEM_ACCESS_TYPE_READ)
9024 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9025
9026 void *pvMem;
9027 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9028 if (rcStrict == VINF_SUCCESS)
9029 { /* likely */ }
9030 else
9031 {
9032 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9033 if (rcStrict == VINF_SUCCESS)
9034 return pvMem;
9035 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9036 }
9037
9038 /*
9039 * Fill in the mapping table entry.
9040 */
9041 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9042 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9043 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9044 pVCpu->iem.s.cActiveMappings++;
9045
9046 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9047 return pvMem;
9048}
9049
9050
9051/**
9052 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9053 *
9054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9055 * @param pvMem The mapping.
9056 * @param fAccess The kind of access.
9057 */
9058IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9059{
9060 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9061 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9062
9063 /* If it's bounce buffered, we may need to write back the buffer. */
9064 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9065 {
9066 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9067 {
9068 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9069 if (rcStrict == VINF_SUCCESS)
9070 return;
9071 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9072 }
9073 }
9074 /* Otherwise unlock it. */
9075 else
9076 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9077
9078 /* Free the entry. */
9079 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9080 Assert(pVCpu->iem.s.cActiveMappings != 0);
9081 pVCpu->iem.s.cActiveMappings--;
9082}
9083
9084#endif
9085
9086#ifndef IN_RING3
9087/**
9088 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9089 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9090 *
9091 * Allows the instruction to be completed and retired, while the IEM user will
9092 * return to ring-3 immediately afterwards and do the postponed writes there.
9093 *
9094 * @returns VBox status code (no strict statuses). Caller must check
9095 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9097 * @param pvMem The mapping.
9098 * @param fAccess The kind of access.
9099 */
9100IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9101{
9102 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9103 AssertReturn(iMemMap >= 0, iMemMap);
9104
9105 /* If it's bounce buffered, we may need to write back the buffer. */
9106 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9107 {
9108 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9109 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9110 }
9111 /* Otherwise unlock it. */
9112 else
9113 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9114
9115 /* Free the entry. */
9116 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9117 Assert(pVCpu->iem.s.cActiveMappings != 0);
9118 pVCpu->iem.s.cActiveMappings--;
9119 return VINF_SUCCESS;
9120}
9121#endif
9122
9123
9124/**
9125 * Rollbacks mappings, releasing page locks and such.
9126 *
9127 * The caller shall only call this after checking cActiveMappings.
9128 *
9129 * @returns Strict VBox status code to pass up.
9130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9131 */
9132IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9133{
9134 Assert(pVCpu->iem.s.cActiveMappings > 0);
9135
9136 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9137 while (iMemMap-- > 0)
9138 {
9139 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9140 if (fAccess != IEM_ACCESS_INVALID)
9141 {
9142 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9143 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9144 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9145 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9146 Assert(pVCpu->iem.s.cActiveMappings > 0);
9147 pVCpu->iem.s.cActiveMappings--;
9148 }
9149 }
9150}
9151
9152
9153/**
9154 * Fetches a data byte.
9155 *
9156 * @returns Strict VBox status code.
9157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9158 * @param pu8Dst Where to return the byte.
9159 * @param iSegReg The index of the segment register to use for
9160 * this access. The base and limits are checked.
9161 * @param GCPtrMem The address of the guest memory.
9162 */
9163IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9164{
9165 /* The lazy approach for now... */
9166 uint8_t const *pu8Src;
9167 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9168 if (rc == VINF_SUCCESS)
9169 {
9170 *pu8Dst = *pu8Src;
9171 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9172 }
9173 return rc;
9174}
9175
9176
9177#ifdef IEM_WITH_SETJMP
9178/**
9179 * Fetches a data byte, longjmp on error.
9180 *
9181 * @returns The byte.
9182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9183 * @param iSegReg The index of the segment register to use for
9184 * this access. The base and limits are checked.
9185 * @param GCPtrMem The address of the guest memory.
9186 */
9187DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9188{
9189 /* The lazy approach for now... */
9190 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9191 uint8_t const bRet = *pu8Src;
9192 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9193 return bRet;
9194}
9195#endif /* IEM_WITH_SETJMP */
9196
9197
9198/**
9199 * Fetches a data word.
9200 *
9201 * @returns Strict VBox status code.
9202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9203 * @param pu16Dst Where to return the word.
9204 * @param iSegReg The index of the segment register to use for
9205 * this access. The base and limits are checked.
9206 * @param GCPtrMem The address of the guest memory.
9207 */
9208IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9209{
9210 /* The lazy approach for now... */
9211 uint16_t const *pu16Src;
9212 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9213 if (rc == VINF_SUCCESS)
9214 {
9215 *pu16Dst = *pu16Src;
9216 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9217 }
9218 return rc;
9219}
9220
9221
9222#ifdef IEM_WITH_SETJMP
9223/**
9224 * Fetches a data word, longjmp on error.
9225 *
9226 * @returns The word
9227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9228 * @param iSegReg The index of the segment register to use for
9229 * this access. The base and limits are checked.
9230 * @param GCPtrMem The address of the guest memory.
9231 */
9232DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9233{
9234 /* The lazy approach for now... */
9235 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9236 uint16_t const u16Ret = *pu16Src;
9237 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9238 return u16Ret;
9239}
9240#endif
9241
9242
9243/**
9244 * Fetches a data dword.
9245 *
9246 * @returns Strict VBox status code.
9247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9248 * @param pu32Dst Where to return the dword.
9249 * @param iSegReg The index of the segment register to use for
9250 * this access. The base and limits are checked.
9251 * @param GCPtrMem The address of the guest memory.
9252 */
9253IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9254{
9255 /* The lazy approach for now... */
9256 uint32_t const *pu32Src;
9257 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9258 if (rc == VINF_SUCCESS)
9259 {
9260 *pu32Dst = *pu32Src;
9261 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9262 }
9263 return rc;
9264}
9265
9266
9267#ifdef IEM_WITH_SETJMP
9268
9269IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9270{
9271 Assert(cbMem >= 1);
9272 Assert(iSegReg < X86_SREG_COUNT);
9273
9274 /*
9275 * 64-bit mode is simpler.
9276 */
9277 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9278 {
9279 if (iSegReg >= X86_SREG_FS)
9280 {
9281 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9282 GCPtrMem += pSel->u64Base;
9283 }
9284
9285 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9286 return GCPtrMem;
9287 }
9288 /*
9289 * 16-bit and 32-bit segmentation.
9290 */
9291 else
9292 {
9293 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9294 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9295 == X86DESCATTR_P /* data, expand up */
9296 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9297 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9298 {
9299 /* expand up */
9300 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9301 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9302 && GCPtrLast32 > (uint32_t)GCPtrMem))
9303 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9304 }
9305 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9306 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9307 {
9308 /* expand down */
9309 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9310 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9311 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9312 && GCPtrLast32 > (uint32_t)GCPtrMem))
9313 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9314 }
9315 else
9316 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9317 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9318 }
9319 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9320}
9321
9322
9323IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9324{
9325 Assert(cbMem >= 1);
9326 Assert(iSegReg < X86_SREG_COUNT);
9327
9328 /*
9329 * 64-bit mode is simpler.
9330 */
9331 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9332 {
9333 if (iSegReg >= X86_SREG_FS)
9334 {
9335 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9336 GCPtrMem += pSel->u64Base;
9337 }
9338
9339 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9340 return GCPtrMem;
9341 }
9342 /*
9343 * 16-bit and 32-bit segmentation.
9344 */
9345 else
9346 {
9347 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9348 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9349 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9350 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9351 {
9352 /* expand up */
9353 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9354 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9355 && GCPtrLast32 > (uint32_t)GCPtrMem))
9356 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9357 }
9358 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9359 {
9360 /* expand down */
9361 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9362 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9363 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9364 && GCPtrLast32 > (uint32_t)GCPtrMem))
9365 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9366 }
9367 else
9368 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9369 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9370 }
9371 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9372}
9373
9374
9375/**
9376 * Fetches a data dword, longjmp on error, fallback/safe version.
9377 *
9378 * @returns The dword
9379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9380 * @param iSegReg The index of the segment register to use for
9381 * this access. The base and limits are checked.
9382 * @param GCPtrMem The address of the guest memory.
9383 */
9384IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9385{
9386 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9387 uint32_t const u32Ret = *pu32Src;
9388 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9389 return u32Ret;
9390}
9391
9392
9393/**
9394 * Fetches a data dword, longjmp on error.
9395 *
9396 * @returns The dword
9397 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9398 * @param iSegReg The index of the segment register to use for
9399 * this access. The base and limits are checked.
9400 * @param GCPtrMem The address of the guest memory.
9401 */
9402DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9403{
9404# ifdef IEM_WITH_DATA_TLB
9405 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9406 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9407 {
9408 /// @todo more later.
9409 }
9410
9411 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9412# else
9413 /* The lazy approach. */
9414 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9415 uint32_t const u32Ret = *pu32Src;
9416 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9417 return u32Ret;
9418# endif
9419}
9420#endif
9421
9422
9423#ifdef SOME_UNUSED_FUNCTION
9424/**
9425 * Fetches a data dword and sign extends it to a qword.
9426 *
9427 * @returns Strict VBox status code.
9428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9429 * @param pu64Dst Where to return the sign extended value.
9430 * @param iSegReg The index of the segment register to use for
9431 * this access. The base and limits are checked.
9432 * @param GCPtrMem The address of the guest memory.
9433 */
9434IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9435{
9436 /* The lazy approach for now... */
9437 int32_t const *pi32Src;
9438 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9439 if (rc == VINF_SUCCESS)
9440 {
9441 *pu64Dst = *pi32Src;
9442 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9443 }
9444#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9445 else
9446 *pu64Dst = 0;
9447#endif
9448 return rc;
9449}
9450#endif
9451
9452
9453/**
9454 * Fetches a data qword.
9455 *
9456 * @returns Strict VBox status code.
9457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9458 * @param pu64Dst Where to return the qword.
9459 * @param iSegReg The index of the segment register to use for
9460 * this access. The base and limits are checked.
9461 * @param GCPtrMem The address of the guest memory.
9462 */
9463IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9464{
9465 /* The lazy approach for now... */
9466 uint64_t const *pu64Src;
9467 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9468 if (rc == VINF_SUCCESS)
9469 {
9470 *pu64Dst = *pu64Src;
9471 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9472 }
9473 return rc;
9474}
9475
9476
9477#ifdef IEM_WITH_SETJMP
9478/**
9479 * Fetches a data qword, longjmp on error.
9480 *
9481 * @returns The qword.
9482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9483 * @param iSegReg The index of the segment register to use for
9484 * this access. The base and limits are checked.
9485 * @param GCPtrMem The address of the guest memory.
9486 */
9487DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9488{
9489 /* The lazy approach for now... */
9490 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9491 uint64_t const u64Ret = *pu64Src;
9492 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9493 return u64Ret;
9494}
9495#endif
9496
9497
9498/**
9499 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9500 *
9501 * @returns Strict VBox status code.
9502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9503 * @param pu64Dst Where to return the qword.
9504 * @param iSegReg The index of the segment register to use for
9505 * this access. The base and limits are checked.
9506 * @param GCPtrMem The address of the guest memory.
9507 */
9508IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9509{
9510 /* The lazy approach for now... */
9511 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9512 if (RT_UNLIKELY(GCPtrMem & 15))
9513 return iemRaiseGeneralProtectionFault0(pVCpu);
9514
9515 uint64_t const *pu64Src;
9516 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9517 if (rc == VINF_SUCCESS)
9518 {
9519 *pu64Dst = *pu64Src;
9520 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9521 }
9522 return rc;
9523}
9524
9525
9526#ifdef IEM_WITH_SETJMP
9527/**
9528 * Fetches a data qword, longjmp on error.
9529 *
9530 * @returns The qword.
9531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9532 * @param iSegReg The index of the segment register to use for
9533 * this access. The base and limits are checked.
9534 * @param GCPtrMem The address of the guest memory.
9535 */
9536DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9537{
9538 /* The lazy approach for now... */
9539 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9540 if (RT_LIKELY(!(GCPtrMem & 15)))
9541 {
9542 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9543 uint64_t const u64Ret = *pu64Src;
9544 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9545 return u64Ret;
9546 }
9547
9548 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9549 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9550}
9551#endif
9552
9553
9554/**
9555 * Fetches a data tword.
9556 *
9557 * @returns Strict VBox status code.
9558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9559 * @param pr80Dst Where to return the tword.
9560 * @param iSegReg The index of the segment register to use for
9561 * this access. The base and limits are checked.
9562 * @param GCPtrMem The address of the guest memory.
9563 */
9564IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9565{
9566 /* The lazy approach for now... */
9567 PCRTFLOAT80U pr80Src;
9568 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9569 if (rc == VINF_SUCCESS)
9570 {
9571 *pr80Dst = *pr80Src;
9572 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9573 }
9574 return rc;
9575}
9576
9577
9578#ifdef IEM_WITH_SETJMP
9579/**
9580 * Fetches a data tword, longjmp on error.
9581 *
9582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9583 * @param pr80Dst Where to return the tword.
9584 * @param iSegReg The index of the segment register to use for
9585 * this access. The base and limits are checked.
9586 * @param GCPtrMem The address of the guest memory.
9587 */
9588DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9589{
9590 /* The lazy approach for now... */
9591 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9592 *pr80Dst = *pr80Src;
9593 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9594}
9595#endif
9596
9597
9598/**
9599 * Fetches a data dqword (double qword), generally SSE related.
9600 *
9601 * @returns Strict VBox status code.
9602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9603 * @param pu128Dst Where to return the qword.
9604 * @param iSegReg The index of the segment register to use for
9605 * this access. The base and limits are checked.
9606 * @param GCPtrMem The address of the guest memory.
9607 */
9608IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9609{
9610 /* The lazy approach for now... */
9611 PCRTUINT128U pu128Src;
9612 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9613 if (rc == VINF_SUCCESS)
9614 {
9615 pu128Dst->au64[0] = pu128Src->au64[0];
9616 pu128Dst->au64[1] = pu128Src->au64[1];
9617 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9618 }
9619 return rc;
9620}
9621
9622
9623#ifdef IEM_WITH_SETJMP
9624/**
9625 * Fetches a data dqword (double qword), generally SSE related.
9626 *
9627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9628 * @param pu128Dst Where to return the qword.
9629 * @param iSegReg The index of the segment register to use for
9630 * this access. The base and limits are checked.
9631 * @param GCPtrMem The address of the guest memory.
9632 */
9633IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9634{
9635 /* The lazy approach for now... */
9636 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9637 pu128Dst->au64[0] = pu128Src->au64[0];
9638 pu128Dst->au64[1] = pu128Src->au64[1];
9639 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9640}
9641#endif
9642
9643
9644/**
9645 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9646 * related.
9647 *
9648 * Raises \#GP(0) if not aligned.
9649 *
9650 * @returns Strict VBox status code.
9651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9652 * @param pu128Dst Where to return the qword.
9653 * @param iSegReg The index of the segment register to use for
9654 * this access. The base and limits are checked.
9655 * @param GCPtrMem The address of the guest memory.
9656 */
9657IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9658{
9659 /* The lazy approach for now... */
9660 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9661 if ( (GCPtrMem & 15)
9662 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9663 return iemRaiseGeneralProtectionFault0(pVCpu);
9664
9665 PCRTUINT128U pu128Src;
9666 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9667 if (rc == VINF_SUCCESS)
9668 {
9669 pu128Dst->au64[0] = pu128Src->au64[0];
9670 pu128Dst->au64[1] = pu128Src->au64[1];
9671 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9672 }
9673 return rc;
9674}
9675
9676
9677#ifdef IEM_WITH_SETJMP
9678/**
9679 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9680 * related, longjmp on error.
9681 *
9682 * Raises \#GP(0) if not aligned.
9683 *
9684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9685 * @param pu128Dst Where to return the qword.
9686 * @param iSegReg The index of the segment register to use for
9687 * this access. The base and limits are checked.
9688 * @param GCPtrMem The address of the guest memory.
9689 */
9690DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9691{
9692 /* The lazy approach for now... */
9693 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9694 if ( (GCPtrMem & 15) == 0
9695 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9696 {
9697 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9698 pu128Dst->au64[0] = pu128Src->au64[0];
9699 pu128Dst->au64[1] = pu128Src->au64[1];
9700 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9701 return;
9702 }
9703
9704 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9705 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9706}
9707#endif
9708
9709
9710/**
9711 * Fetches a data oword (octo word), generally AVX related.
9712 *
9713 * @returns Strict VBox status code.
9714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9715 * @param pu256Dst Where to return the qword.
9716 * @param iSegReg The index of the segment register to use for
9717 * this access. The base and limits are checked.
9718 * @param GCPtrMem The address of the guest memory.
9719 */
9720IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9721{
9722 /* The lazy approach for now... */
9723 PCRTUINT256U pu256Src;
9724 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9725 if (rc == VINF_SUCCESS)
9726 {
9727 pu256Dst->au64[0] = pu256Src->au64[0];
9728 pu256Dst->au64[1] = pu256Src->au64[1];
9729 pu256Dst->au64[2] = pu256Src->au64[2];
9730 pu256Dst->au64[3] = pu256Src->au64[3];
9731 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9732 }
9733 return rc;
9734}
9735
9736
9737#ifdef IEM_WITH_SETJMP
9738/**
9739 * Fetches a data oword (octo word), generally AVX related.
9740 *
9741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9742 * @param pu256Dst Where to return the qword.
9743 * @param iSegReg The index of the segment register to use for
9744 * this access. The base and limits are checked.
9745 * @param GCPtrMem The address of the guest memory.
9746 */
9747IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9748{
9749 /* The lazy approach for now... */
9750 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9751 pu256Dst->au64[0] = pu256Src->au64[0];
9752 pu256Dst->au64[1] = pu256Src->au64[1];
9753 pu256Dst->au64[2] = pu256Src->au64[2];
9754 pu256Dst->au64[3] = pu256Src->au64[3];
9755 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9756}
9757#endif
9758
9759
9760/**
9761 * Fetches a data oword (octo word) at an aligned address, generally AVX
9762 * related.
9763 *
9764 * Raises \#GP(0) if not aligned.
9765 *
9766 * @returns Strict VBox status code.
9767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9768 * @param pu256Dst Where to return the qword.
9769 * @param iSegReg The index of the segment register to use for
9770 * this access. The base and limits are checked.
9771 * @param GCPtrMem The address of the guest memory.
9772 */
9773IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9774{
9775 /* The lazy approach for now... */
9776 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9777 if (GCPtrMem & 31)
9778 return iemRaiseGeneralProtectionFault0(pVCpu);
9779
9780 PCRTUINT256U pu256Src;
9781 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9782 if (rc == VINF_SUCCESS)
9783 {
9784 pu256Dst->au64[0] = pu256Src->au64[0];
9785 pu256Dst->au64[1] = pu256Src->au64[1];
9786 pu256Dst->au64[2] = pu256Src->au64[2];
9787 pu256Dst->au64[3] = pu256Src->au64[3];
9788 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9789 }
9790 return rc;
9791}
9792
9793
9794#ifdef IEM_WITH_SETJMP
9795/**
9796 * Fetches a data oword (octo word) at an aligned address, generally AVX
9797 * related, longjmp on error.
9798 *
9799 * Raises \#GP(0) if not aligned.
9800 *
9801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9802 * @param pu256Dst Where to return the qword.
9803 * @param iSegReg The index of the segment register to use for
9804 * this access. The base and limits are checked.
9805 * @param GCPtrMem The address of the guest memory.
9806 */
9807DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9808{
9809 /* The lazy approach for now... */
9810 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9811 if ((GCPtrMem & 31) == 0)
9812 {
9813 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9814 pu256Dst->au64[0] = pu256Src->au64[0];
9815 pu256Dst->au64[1] = pu256Src->au64[1];
9816 pu256Dst->au64[2] = pu256Src->au64[2];
9817 pu256Dst->au64[3] = pu256Src->au64[3];
9818 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9819 return;
9820 }
9821
9822 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9823 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9824}
9825#endif
9826
9827
9828
9829/**
9830 * Fetches a descriptor register (lgdt, lidt).
9831 *
9832 * @returns Strict VBox status code.
9833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9834 * @param pcbLimit Where to return the limit.
9835 * @param pGCPtrBase Where to return the base.
9836 * @param iSegReg The index of the segment register to use for
9837 * this access. The base and limits are checked.
9838 * @param GCPtrMem The address of the guest memory.
9839 * @param enmOpSize The effective operand size.
9840 */
9841IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9842 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9843{
9844 /*
9845 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9846 * little special:
9847 * - The two reads are done separately.
9848 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9849 * - We suspect the 386 to actually commit the limit before the base in
9850 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9851 * don't try emulate this eccentric behavior, because it's not well
9852 * enough understood and rather hard to trigger.
9853 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9854 */
9855 VBOXSTRICTRC rcStrict;
9856 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9857 {
9858 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9859 if (rcStrict == VINF_SUCCESS)
9860 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9861 }
9862 else
9863 {
9864 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9865 if (enmOpSize == IEMMODE_32BIT)
9866 {
9867 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9868 {
9869 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9870 if (rcStrict == VINF_SUCCESS)
9871 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9872 }
9873 else
9874 {
9875 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9876 if (rcStrict == VINF_SUCCESS)
9877 {
9878 *pcbLimit = (uint16_t)uTmp;
9879 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9880 }
9881 }
9882 if (rcStrict == VINF_SUCCESS)
9883 *pGCPtrBase = uTmp;
9884 }
9885 else
9886 {
9887 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9888 if (rcStrict == VINF_SUCCESS)
9889 {
9890 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9891 if (rcStrict == VINF_SUCCESS)
9892 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9893 }
9894 }
9895 }
9896 return rcStrict;
9897}
9898
9899
9900
9901/**
9902 * Stores a data byte.
9903 *
9904 * @returns Strict VBox status code.
9905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9906 * @param iSegReg The index of the segment register to use for
9907 * this access. The base and limits are checked.
9908 * @param GCPtrMem The address of the guest memory.
9909 * @param u8Value The value to store.
9910 */
9911IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9912{
9913 /* The lazy approach for now... */
9914 uint8_t *pu8Dst;
9915 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9916 if (rc == VINF_SUCCESS)
9917 {
9918 *pu8Dst = u8Value;
9919 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9920 }
9921 return rc;
9922}
9923
9924
9925#ifdef IEM_WITH_SETJMP
9926/**
9927 * Stores a data byte, longjmp on error.
9928 *
9929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9930 * @param iSegReg The index of the segment register to use for
9931 * this access. The base and limits are checked.
9932 * @param GCPtrMem The address of the guest memory.
9933 * @param u8Value The value to store.
9934 */
9935IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9936{
9937 /* The lazy approach for now... */
9938 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9939 *pu8Dst = u8Value;
9940 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9941}
9942#endif
9943
9944
9945/**
9946 * Stores a data word.
9947 *
9948 * @returns Strict VBox status code.
9949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9950 * @param iSegReg The index of the segment register to use for
9951 * this access. The base and limits are checked.
9952 * @param GCPtrMem The address of the guest memory.
9953 * @param u16Value The value to store.
9954 */
9955IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9956{
9957 /* The lazy approach for now... */
9958 uint16_t *pu16Dst;
9959 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9960 if (rc == VINF_SUCCESS)
9961 {
9962 *pu16Dst = u16Value;
9963 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9964 }
9965 return rc;
9966}
9967
9968
9969#ifdef IEM_WITH_SETJMP
9970/**
9971 * Stores a data word, longjmp on error.
9972 *
9973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9974 * @param iSegReg The index of the segment register to use for
9975 * this access. The base and limits are checked.
9976 * @param GCPtrMem The address of the guest memory.
9977 * @param u16Value The value to store.
9978 */
9979IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9980{
9981 /* The lazy approach for now... */
9982 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9983 *pu16Dst = u16Value;
9984 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9985}
9986#endif
9987
9988
9989/**
9990 * Stores a data dword.
9991 *
9992 * @returns Strict VBox status code.
9993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9994 * @param iSegReg The index of the segment register to use for
9995 * this access. The base and limits are checked.
9996 * @param GCPtrMem The address of the guest memory.
9997 * @param u32Value The value to store.
9998 */
9999IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10000{
10001 /* The lazy approach for now... */
10002 uint32_t *pu32Dst;
10003 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10004 if (rc == VINF_SUCCESS)
10005 {
10006 *pu32Dst = u32Value;
10007 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10008 }
10009 return rc;
10010}
10011
10012
10013#ifdef IEM_WITH_SETJMP
10014/**
10015 * Stores a data dword.
10016 *
10017 * @returns Strict VBox status code.
10018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10019 * @param iSegReg The index of the segment register to use for
10020 * this access. The base and limits are checked.
10021 * @param GCPtrMem The address of the guest memory.
10022 * @param u32Value The value to store.
10023 */
10024IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10025{
10026 /* The lazy approach for now... */
10027 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10028 *pu32Dst = u32Value;
10029 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10030}
10031#endif
10032
10033
10034/**
10035 * Stores a data qword.
10036 *
10037 * @returns Strict VBox status code.
10038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10039 * @param iSegReg The index of the segment register to use for
10040 * this access. The base and limits are checked.
10041 * @param GCPtrMem The address of the guest memory.
10042 * @param u64Value The value to store.
10043 */
10044IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10045{
10046 /* The lazy approach for now... */
10047 uint64_t *pu64Dst;
10048 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10049 if (rc == VINF_SUCCESS)
10050 {
10051 *pu64Dst = u64Value;
10052 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10053 }
10054 return rc;
10055}
10056
10057
10058#ifdef IEM_WITH_SETJMP
10059/**
10060 * Stores a data qword, longjmp on error.
10061 *
10062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10063 * @param iSegReg The index of the segment register to use for
10064 * this access. The base and limits are checked.
10065 * @param GCPtrMem The address of the guest memory.
10066 * @param u64Value The value to store.
10067 */
10068IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10069{
10070 /* The lazy approach for now... */
10071 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10072 *pu64Dst = u64Value;
10073 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10074}
10075#endif
10076
10077
10078/**
10079 * Stores a data dqword.
10080 *
10081 * @returns Strict VBox status code.
10082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10083 * @param iSegReg The index of the segment register to use for
10084 * this access. The base and limits are checked.
10085 * @param GCPtrMem The address of the guest memory.
10086 * @param u128Value The value to store.
10087 */
10088IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10089{
10090 /* The lazy approach for now... */
10091 PRTUINT128U pu128Dst;
10092 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10093 if (rc == VINF_SUCCESS)
10094 {
10095 pu128Dst->au64[0] = u128Value.au64[0];
10096 pu128Dst->au64[1] = u128Value.au64[1];
10097 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10098 }
10099 return rc;
10100}
10101
10102
10103#ifdef IEM_WITH_SETJMP
10104/**
10105 * Stores a data dqword, longjmp on error.
10106 *
10107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10108 * @param iSegReg The index of the segment register to use for
10109 * this access. The base and limits are checked.
10110 * @param GCPtrMem The address of the guest memory.
10111 * @param u128Value The value to store.
10112 */
10113IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10114{
10115 /* The lazy approach for now... */
10116 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10117 pu128Dst->au64[0] = u128Value.au64[0];
10118 pu128Dst->au64[1] = u128Value.au64[1];
10119 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10120}
10121#endif
10122
10123
10124/**
10125 * Stores a data dqword, SSE aligned.
10126 *
10127 * @returns Strict VBox status code.
10128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10129 * @param iSegReg The index of the segment register to use for
10130 * this access. The base and limits are checked.
10131 * @param GCPtrMem The address of the guest memory.
10132 * @param u128Value The value to store.
10133 */
10134IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10135{
10136 /* The lazy approach for now... */
10137 if ( (GCPtrMem & 15)
10138 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10139 return iemRaiseGeneralProtectionFault0(pVCpu);
10140
10141 PRTUINT128U pu128Dst;
10142 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10143 if (rc == VINF_SUCCESS)
10144 {
10145 pu128Dst->au64[0] = u128Value.au64[0];
10146 pu128Dst->au64[1] = u128Value.au64[1];
10147 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10148 }
10149 return rc;
10150}
10151
10152
10153#ifdef IEM_WITH_SETJMP
10154/**
10155 * Stores a data dqword, SSE aligned.
10156 *
10157 * @returns Strict VBox status code.
10158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10159 * @param iSegReg The index of the segment register to use for
10160 * this access. The base and limits are checked.
10161 * @param GCPtrMem The address of the guest memory.
10162 * @param u128Value The value to store.
10163 */
10164DECL_NO_INLINE(IEM_STATIC, void)
10165iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10166{
10167 /* The lazy approach for now... */
10168 if ( (GCPtrMem & 15) == 0
10169 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10170 {
10171 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10172 pu128Dst->au64[0] = u128Value.au64[0];
10173 pu128Dst->au64[1] = u128Value.au64[1];
10174 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10175 return;
10176 }
10177
10178 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10179 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10180}
10181#endif
10182
10183
10184/**
10185 * Stores a descriptor register (sgdt, sidt).
10186 *
10187 * @returns Strict VBox status code.
10188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10189 * @param cbLimit The limit.
10190 * @param GCPtrBase The base address.
10191 * @param iSegReg The index of the segment register to use for
10192 * this access. The base and limits are checked.
10193 * @param GCPtrMem The address of the guest memory.
10194 */
10195IEM_STATIC VBOXSTRICTRC
10196iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10197{
10198 VBOXSTRICTRC rcStrict;
10199 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
10200 {
10201 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
10202 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
10203 }
10204
10205 /*
10206 * The SIDT and SGDT instructions actually stores the data using two
10207 * independent writes. The instructions does not respond to opsize prefixes.
10208 */
10209 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10210 if (rcStrict == VINF_SUCCESS)
10211 {
10212 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10213 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10214 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10215 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10216 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10217 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10218 else
10219 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10220 }
10221 return rcStrict;
10222}
10223
10224
10225/**
10226 * Pushes a word onto the stack.
10227 *
10228 * @returns Strict VBox status code.
10229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10230 * @param u16Value The value to push.
10231 */
10232IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10233{
10234 /* Increment the stack pointer. */
10235 uint64_t uNewRsp;
10236 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10237 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10238
10239 /* Write the word the lazy way. */
10240 uint16_t *pu16Dst;
10241 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10242 if (rc == VINF_SUCCESS)
10243 {
10244 *pu16Dst = u16Value;
10245 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10246 }
10247
10248 /* Commit the new RSP value unless we an access handler made trouble. */
10249 if (rc == VINF_SUCCESS)
10250 pCtx->rsp = uNewRsp;
10251
10252 return rc;
10253}
10254
10255
10256/**
10257 * Pushes a dword onto the stack.
10258 *
10259 * @returns Strict VBox status code.
10260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10261 * @param u32Value The value to push.
10262 */
10263IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10264{
10265 /* Increment the stack pointer. */
10266 uint64_t uNewRsp;
10267 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10268 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10269
10270 /* Write the dword the lazy way. */
10271 uint32_t *pu32Dst;
10272 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10273 if (rc == VINF_SUCCESS)
10274 {
10275 *pu32Dst = u32Value;
10276 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10277 }
10278
10279 /* Commit the new RSP value unless we an access handler made trouble. */
10280 if (rc == VINF_SUCCESS)
10281 pCtx->rsp = uNewRsp;
10282
10283 return rc;
10284}
10285
10286
10287/**
10288 * Pushes a dword segment register value onto the stack.
10289 *
10290 * @returns Strict VBox status code.
10291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10292 * @param u32Value The value to push.
10293 */
10294IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10295{
10296 /* Increment the stack pointer. */
10297 uint64_t uNewRsp;
10298 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10299 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10300
10301 VBOXSTRICTRC rc;
10302 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10303 {
10304 /* The recompiler writes a full dword. */
10305 uint32_t *pu32Dst;
10306 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10307 if (rc == VINF_SUCCESS)
10308 {
10309 *pu32Dst = u32Value;
10310 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10311 }
10312 }
10313 else
10314 {
10315 /* The intel docs talks about zero extending the selector register
10316 value. My actual intel CPU here might be zero extending the value
10317 but it still only writes the lower word... */
10318 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10319 * happens when crossing an electric page boundrary, is the high word checked
10320 * for write accessibility or not? Probably it is. What about segment limits?
10321 * It appears this behavior is also shared with trap error codes.
10322 *
10323 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10324 * ancient hardware when it actually did change. */
10325 uint16_t *pu16Dst;
10326 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10327 if (rc == VINF_SUCCESS)
10328 {
10329 *pu16Dst = (uint16_t)u32Value;
10330 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10331 }
10332 }
10333
10334 /* Commit the new RSP value unless we an access handler made trouble. */
10335 if (rc == VINF_SUCCESS)
10336 pCtx->rsp = uNewRsp;
10337
10338 return rc;
10339}
10340
10341
10342/**
10343 * Pushes a qword onto the stack.
10344 *
10345 * @returns Strict VBox status code.
10346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10347 * @param u64Value The value to push.
10348 */
10349IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10350{
10351 /* Increment the stack pointer. */
10352 uint64_t uNewRsp;
10353 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10354 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10355
10356 /* Write the word the lazy way. */
10357 uint64_t *pu64Dst;
10358 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10359 if (rc == VINF_SUCCESS)
10360 {
10361 *pu64Dst = u64Value;
10362 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10363 }
10364
10365 /* Commit the new RSP value unless we an access handler made trouble. */
10366 if (rc == VINF_SUCCESS)
10367 pCtx->rsp = uNewRsp;
10368
10369 return rc;
10370}
10371
10372
10373/**
10374 * Pops a word from the stack.
10375 *
10376 * @returns Strict VBox status code.
10377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10378 * @param pu16Value Where to store the popped value.
10379 */
10380IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10381{
10382 /* Increment the stack pointer. */
10383 uint64_t uNewRsp;
10384 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10385 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10386
10387 /* Write the word the lazy way. */
10388 uint16_t const *pu16Src;
10389 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10390 if (rc == VINF_SUCCESS)
10391 {
10392 *pu16Value = *pu16Src;
10393 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10394
10395 /* Commit the new RSP value. */
10396 if (rc == VINF_SUCCESS)
10397 pCtx->rsp = uNewRsp;
10398 }
10399
10400 return rc;
10401}
10402
10403
10404/**
10405 * Pops a dword from the stack.
10406 *
10407 * @returns Strict VBox status code.
10408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10409 * @param pu32Value Where to store the popped value.
10410 */
10411IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10412{
10413 /* Increment the stack pointer. */
10414 uint64_t uNewRsp;
10415 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10416 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10417
10418 /* Write the word the lazy way. */
10419 uint32_t const *pu32Src;
10420 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10421 if (rc == VINF_SUCCESS)
10422 {
10423 *pu32Value = *pu32Src;
10424 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10425
10426 /* Commit the new RSP value. */
10427 if (rc == VINF_SUCCESS)
10428 pCtx->rsp = uNewRsp;
10429 }
10430
10431 return rc;
10432}
10433
10434
10435/**
10436 * Pops a qword from the stack.
10437 *
10438 * @returns Strict VBox status code.
10439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10440 * @param pu64Value Where to store the popped value.
10441 */
10442IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10443{
10444 /* Increment the stack pointer. */
10445 uint64_t uNewRsp;
10446 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10447 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10448
10449 /* Write the word the lazy way. */
10450 uint64_t const *pu64Src;
10451 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10452 if (rc == VINF_SUCCESS)
10453 {
10454 *pu64Value = *pu64Src;
10455 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10456
10457 /* Commit the new RSP value. */
10458 if (rc == VINF_SUCCESS)
10459 pCtx->rsp = uNewRsp;
10460 }
10461
10462 return rc;
10463}
10464
10465
10466/**
10467 * Pushes a word onto the stack, using a temporary stack pointer.
10468 *
10469 * @returns Strict VBox status code.
10470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10471 * @param u16Value The value to push.
10472 * @param pTmpRsp Pointer to the temporary stack pointer.
10473 */
10474IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10475{
10476 /* Increment the stack pointer. */
10477 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10478 RTUINT64U NewRsp = *pTmpRsp;
10479 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10480
10481 /* Write the word the lazy way. */
10482 uint16_t *pu16Dst;
10483 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10484 if (rc == VINF_SUCCESS)
10485 {
10486 *pu16Dst = u16Value;
10487 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10488 }
10489
10490 /* Commit the new RSP value unless we an access handler made trouble. */
10491 if (rc == VINF_SUCCESS)
10492 *pTmpRsp = NewRsp;
10493
10494 return rc;
10495}
10496
10497
10498/**
10499 * Pushes a dword onto the stack, using a temporary stack pointer.
10500 *
10501 * @returns Strict VBox status code.
10502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10503 * @param u32Value The value to push.
10504 * @param pTmpRsp Pointer to the temporary stack pointer.
10505 */
10506IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10507{
10508 /* Increment the stack pointer. */
10509 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10510 RTUINT64U NewRsp = *pTmpRsp;
10511 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10512
10513 /* Write the word the lazy way. */
10514 uint32_t *pu32Dst;
10515 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10516 if (rc == VINF_SUCCESS)
10517 {
10518 *pu32Dst = u32Value;
10519 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10520 }
10521
10522 /* Commit the new RSP value unless we an access handler made trouble. */
10523 if (rc == VINF_SUCCESS)
10524 *pTmpRsp = NewRsp;
10525
10526 return rc;
10527}
10528
10529
10530/**
10531 * Pushes a dword onto the stack, using a temporary stack pointer.
10532 *
10533 * @returns Strict VBox status code.
10534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10535 * @param u64Value The value to push.
10536 * @param pTmpRsp Pointer to the temporary stack pointer.
10537 */
10538IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10539{
10540 /* Increment the stack pointer. */
10541 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10542 RTUINT64U NewRsp = *pTmpRsp;
10543 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10544
10545 /* Write the word the lazy way. */
10546 uint64_t *pu64Dst;
10547 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10548 if (rc == VINF_SUCCESS)
10549 {
10550 *pu64Dst = u64Value;
10551 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10552 }
10553
10554 /* Commit the new RSP value unless we an access handler made trouble. */
10555 if (rc == VINF_SUCCESS)
10556 *pTmpRsp = NewRsp;
10557
10558 return rc;
10559}
10560
10561
10562/**
10563 * Pops a word from the stack, using a temporary stack pointer.
10564 *
10565 * @returns Strict VBox status code.
10566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10567 * @param pu16Value Where to store the popped value.
10568 * @param pTmpRsp Pointer to the temporary stack pointer.
10569 */
10570IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10571{
10572 /* Increment the stack pointer. */
10573 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10574 RTUINT64U NewRsp = *pTmpRsp;
10575 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10576
10577 /* Write the word the lazy way. */
10578 uint16_t const *pu16Src;
10579 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10580 if (rc == VINF_SUCCESS)
10581 {
10582 *pu16Value = *pu16Src;
10583 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10584
10585 /* Commit the new RSP value. */
10586 if (rc == VINF_SUCCESS)
10587 *pTmpRsp = NewRsp;
10588 }
10589
10590 return rc;
10591}
10592
10593
10594/**
10595 * Pops a dword from the stack, using a temporary stack pointer.
10596 *
10597 * @returns Strict VBox status code.
10598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10599 * @param pu32Value Where to store the popped value.
10600 * @param pTmpRsp Pointer to the temporary stack pointer.
10601 */
10602IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10603{
10604 /* Increment the stack pointer. */
10605 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10606 RTUINT64U NewRsp = *pTmpRsp;
10607 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10608
10609 /* Write the word the lazy way. */
10610 uint32_t const *pu32Src;
10611 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10612 if (rc == VINF_SUCCESS)
10613 {
10614 *pu32Value = *pu32Src;
10615 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10616
10617 /* Commit the new RSP value. */
10618 if (rc == VINF_SUCCESS)
10619 *pTmpRsp = NewRsp;
10620 }
10621
10622 return rc;
10623}
10624
10625
10626/**
10627 * Pops a qword from the stack, using a temporary stack pointer.
10628 *
10629 * @returns Strict VBox status code.
10630 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10631 * @param pu64Value Where to store the popped value.
10632 * @param pTmpRsp Pointer to the temporary stack pointer.
10633 */
10634IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10635{
10636 /* Increment the stack pointer. */
10637 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10638 RTUINT64U NewRsp = *pTmpRsp;
10639 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10640
10641 /* Write the word the lazy way. */
10642 uint64_t const *pu64Src;
10643 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10644 if (rcStrict == VINF_SUCCESS)
10645 {
10646 *pu64Value = *pu64Src;
10647 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10648
10649 /* Commit the new RSP value. */
10650 if (rcStrict == VINF_SUCCESS)
10651 *pTmpRsp = NewRsp;
10652 }
10653
10654 return rcStrict;
10655}
10656
10657
10658/**
10659 * Begin a special stack push (used by interrupt, exceptions and such).
10660 *
10661 * This will raise \#SS or \#PF if appropriate.
10662 *
10663 * @returns Strict VBox status code.
10664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10665 * @param cbMem The number of bytes to push onto the stack.
10666 * @param ppvMem Where to return the pointer to the stack memory.
10667 * As with the other memory functions this could be
10668 * direct access or bounce buffered access, so
10669 * don't commit register until the commit call
10670 * succeeds.
10671 * @param puNewRsp Where to return the new RSP value. This must be
10672 * passed unchanged to
10673 * iemMemStackPushCommitSpecial().
10674 */
10675IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10676{
10677 Assert(cbMem < UINT8_MAX);
10678 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10679 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10680 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10681}
10682
10683
10684/**
10685 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10686 *
10687 * This will update the rSP.
10688 *
10689 * @returns Strict VBox status code.
10690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10691 * @param pvMem The pointer returned by
10692 * iemMemStackPushBeginSpecial().
10693 * @param uNewRsp The new RSP value returned by
10694 * iemMemStackPushBeginSpecial().
10695 */
10696IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10697{
10698 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10699 if (rcStrict == VINF_SUCCESS)
10700 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10701 return rcStrict;
10702}
10703
10704
10705/**
10706 * Begin a special stack pop (used by iret, retf and such).
10707 *
10708 * This will raise \#SS or \#PF if appropriate.
10709 *
10710 * @returns Strict VBox status code.
10711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10712 * @param cbMem The number of bytes to pop from the stack.
10713 * @param ppvMem Where to return the pointer to the stack memory.
10714 * @param puNewRsp Where to return the new RSP value. This must be
10715 * assigned to CPUMCTX::rsp manually some time
10716 * after iemMemStackPopDoneSpecial() has been
10717 * called.
10718 */
10719IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10720{
10721 Assert(cbMem < UINT8_MAX);
10722 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10723 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10724 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10725}
10726
10727
10728/**
10729 * Continue a special stack pop (used by iret and retf).
10730 *
10731 * This will raise \#SS or \#PF if appropriate.
10732 *
10733 * @returns Strict VBox status code.
10734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10735 * @param cbMem The number of bytes to pop from the stack.
10736 * @param ppvMem Where to return the pointer to the stack memory.
10737 * @param puNewRsp Where to return the new RSP value. This must be
10738 * assigned to CPUMCTX::rsp manually some time
10739 * after iemMemStackPopDoneSpecial() has been
10740 * called.
10741 */
10742IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10743{
10744 Assert(cbMem < UINT8_MAX);
10745 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10746 RTUINT64U NewRsp;
10747 NewRsp.u = *puNewRsp;
10748 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10749 *puNewRsp = NewRsp.u;
10750 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10751}
10752
10753
10754/**
10755 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10756 * iemMemStackPopContinueSpecial).
10757 *
10758 * The caller will manually commit the rSP.
10759 *
10760 * @returns Strict VBox status code.
10761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10762 * @param pvMem The pointer returned by
10763 * iemMemStackPopBeginSpecial() or
10764 * iemMemStackPopContinueSpecial().
10765 */
10766IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10767{
10768 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10769}
10770
10771
10772/**
10773 * Fetches a system table byte.
10774 *
10775 * @returns Strict VBox status code.
10776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10777 * @param pbDst Where to return the byte.
10778 * @param iSegReg The index of the segment register to use for
10779 * this access. The base and limits are checked.
10780 * @param GCPtrMem The address of the guest memory.
10781 */
10782IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10783{
10784 /* The lazy approach for now... */
10785 uint8_t const *pbSrc;
10786 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10787 if (rc == VINF_SUCCESS)
10788 {
10789 *pbDst = *pbSrc;
10790 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10791 }
10792 return rc;
10793}
10794
10795
10796/**
10797 * Fetches a system table word.
10798 *
10799 * @returns Strict VBox status code.
10800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10801 * @param pu16Dst Where to return the word.
10802 * @param iSegReg The index of the segment register to use for
10803 * this access. The base and limits are checked.
10804 * @param GCPtrMem The address of the guest memory.
10805 */
10806IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10807{
10808 /* The lazy approach for now... */
10809 uint16_t const *pu16Src;
10810 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10811 if (rc == VINF_SUCCESS)
10812 {
10813 *pu16Dst = *pu16Src;
10814 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10815 }
10816 return rc;
10817}
10818
10819
10820/**
10821 * Fetches a system table dword.
10822 *
10823 * @returns Strict VBox status code.
10824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10825 * @param pu32Dst Where to return the dword.
10826 * @param iSegReg The index of the segment register to use for
10827 * this access. The base and limits are checked.
10828 * @param GCPtrMem The address of the guest memory.
10829 */
10830IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10831{
10832 /* The lazy approach for now... */
10833 uint32_t const *pu32Src;
10834 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10835 if (rc == VINF_SUCCESS)
10836 {
10837 *pu32Dst = *pu32Src;
10838 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10839 }
10840 return rc;
10841}
10842
10843
10844/**
10845 * Fetches a system table qword.
10846 *
10847 * @returns Strict VBox status code.
10848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10849 * @param pu64Dst Where to return the qword.
10850 * @param iSegReg The index of the segment register to use for
10851 * this access. The base and limits are checked.
10852 * @param GCPtrMem The address of the guest memory.
10853 */
10854IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10855{
10856 /* The lazy approach for now... */
10857 uint64_t const *pu64Src;
10858 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10859 if (rc == VINF_SUCCESS)
10860 {
10861 *pu64Dst = *pu64Src;
10862 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10863 }
10864 return rc;
10865}
10866
10867
10868/**
10869 * Fetches a descriptor table entry with caller specified error code.
10870 *
10871 * @returns Strict VBox status code.
10872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10873 * @param pDesc Where to return the descriptor table entry.
10874 * @param uSel The selector which table entry to fetch.
10875 * @param uXcpt The exception to raise on table lookup error.
10876 * @param uErrorCode The error code associated with the exception.
10877 */
10878IEM_STATIC VBOXSTRICTRC
10879iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10880{
10881 AssertPtr(pDesc);
10882 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10883
10884 /** @todo did the 286 require all 8 bytes to be accessible? */
10885 /*
10886 * Get the selector table base and check bounds.
10887 */
10888 RTGCPTR GCPtrBase;
10889 if (uSel & X86_SEL_LDT)
10890 {
10891 if ( !pCtx->ldtr.Attr.n.u1Present
10892 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10893 {
10894 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10895 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10896 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10897 uErrorCode, 0);
10898 }
10899
10900 Assert(pCtx->ldtr.Attr.n.u1Present);
10901 GCPtrBase = pCtx->ldtr.u64Base;
10902 }
10903 else
10904 {
10905 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10906 {
10907 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10908 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10909 uErrorCode, 0);
10910 }
10911 GCPtrBase = pCtx->gdtr.pGdt;
10912 }
10913
10914 /*
10915 * Read the legacy descriptor and maybe the long mode extensions if
10916 * required.
10917 */
10918 VBOXSTRICTRC rcStrict;
10919 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10920 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10921 else
10922 {
10923 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10924 if (rcStrict == VINF_SUCCESS)
10925 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10926 if (rcStrict == VINF_SUCCESS)
10927 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10928 if (rcStrict == VINF_SUCCESS)
10929 pDesc->Legacy.au16[3] = 0;
10930 else
10931 return rcStrict;
10932 }
10933
10934 if (rcStrict == VINF_SUCCESS)
10935 {
10936 if ( !IEM_IS_LONG_MODE(pVCpu)
10937 || pDesc->Legacy.Gen.u1DescType)
10938 pDesc->Long.au64[1] = 0;
10939 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10940 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10941 else
10942 {
10943 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10944 /** @todo is this the right exception? */
10945 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10946 }
10947 }
10948 return rcStrict;
10949}
10950
10951
10952/**
10953 * Fetches a descriptor table entry.
10954 *
10955 * @returns Strict VBox status code.
10956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10957 * @param pDesc Where to return the descriptor table entry.
10958 * @param uSel The selector which table entry to fetch.
10959 * @param uXcpt The exception to raise on table lookup error.
10960 */
10961IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10962{
10963 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10964}
10965
10966
10967/**
10968 * Fakes a long mode stack selector for SS = 0.
10969 *
10970 * @param pDescSs Where to return the fake stack descriptor.
10971 * @param uDpl The DPL we want.
10972 */
10973IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10974{
10975 pDescSs->Long.au64[0] = 0;
10976 pDescSs->Long.au64[1] = 0;
10977 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10978 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10979 pDescSs->Long.Gen.u2Dpl = uDpl;
10980 pDescSs->Long.Gen.u1Present = 1;
10981 pDescSs->Long.Gen.u1Long = 1;
10982}
10983
10984
10985/**
10986 * Marks the selector descriptor as accessed (only non-system descriptors).
10987 *
10988 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10989 * will therefore skip the limit checks.
10990 *
10991 * @returns Strict VBox status code.
10992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10993 * @param uSel The selector.
10994 */
10995IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10996{
10997 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10998
10999 /*
11000 * Get the selector table base and calculate the entry address.
11001 */
11002 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11003 ? pCtx->ldtr.u64Base
11004 : pCtx->gdtr.pGdt;
11005 GCPtr += uSel & X86_SEL_MASK;
11006
11007 /*
11008 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11009 * ugly stuff to avoid this. This will make sure it's an atomic access
11010 * as well more or less remove any question about 8-bit or 32-bit accesss.
11011 */
11012 VBOXSTRICTRC rcStrict;
11013 uint32_t volatile *pu32;
11014 if ((GCPtr & 3) == 0)
11015 {
11016 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11017 GCPtr += 2 + 2;
11018 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11019 if (rcStrict != VINF_SUCCESS)
11020 return rcStrict;
11021 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11022 }
11023 else
11024 {
11025 /* The misaligned GDT/LDT case, map the whole thing. */
11026 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11027 if (rcStrict != VINF_SUCCESS)
11028 return rcStrict;
11029 switch ((uintptr_t)pu32 & 3)
11030 {
11031 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11032 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11033 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11034 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11035 }
11036 }
11037
11038 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11039}
11040
11041/** @} */
11042
11043
11044/*
11045 * Include the C/C++ implementation of instruction.
11046 */
11047#include "IEMAllCImpl.cpp.h"
11048
11049
11050
11051/** @name "Microcode" macros.
11052 *
11053 * The idea is that we should be able to use the same code to interpret
11054 * instructions as well as recompiler instructions. Thus this obfuscation.
11055 *
11056 * @{
11057 */
11058#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11059#define IEM_MC_END() }
11060#define IEM_MC_PAUSE() do {} while (0)
11061#define IEM_MC_CONTINUE() do {} while (0)
11062
11063/** Internal macro. */
11064#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11065 do \
11066 { \
11067 VBOXSTRICTRC rcStrict2 = a_Expr; \
11068 if (rcStrict2 != VINF_SUCCESS) \
11069 return rcStrict2; \
11070 } while (0)
11071
11072
11073#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11074#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11075#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11076#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11077#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11078#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11079#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11080#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11081#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11082 do { \
11083 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11084 return iemRaiseDeviceNotAvailable(pVCpu); \
11085 } while (0)
11086#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11087 do { \
11088 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11089 return iemRaiseDeviceNotAvailable(pVCpu); \
11090 } while (0)
11091#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11092 do { \
11093 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11094 return iemRaiseMathFault(pVCpu); \
11095 } while (0)
11096#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11097 do { \
11098 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11099 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11100 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11101 return iemRaiseUndefinedOpcode(pVCpu); \
11102 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11103 return iemRaiseDeviceNotAvailable(pVCpu); \
11104 } while (0)
11105#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11106 do { \
11107 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11108 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11109 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11110 return iemRaiseUndefinedOpcode(pVCpu); \
11111 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11112 return iemRaiseDeviceNotAvailable(pVCpu); \
11113 } while (0)
11114#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11115 do { \
11116 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11117 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11118 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11119 return iemRaiseUndefinedOpcode(pVCpu); \
11120 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11121 return iemRaiseDeviceNotAvailable(pVCpu); \
11122 } while (0)
11123#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11124 do { \
11125 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11126 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11127 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11128 return iemRaiseUndefinedOpcode(pVCpu); \
11129 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11130 return iemRaiseDeviceNotAvailable(pVCpu); \
11131 } while (0)
11132#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11133 do { \
11134 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11135 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11136 return iemRaiseUndefinedOpcode(pVCpu); \
11137 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11138 return iemRaiseDeviceNotAvailable(pVCpu); \
11139 } while (0)
11140#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11141 do { \
11142 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11143 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11144 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11145 return iemRaiseUndefinedOpcode(pVCpu); \
11146 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11147 return iemRaiseDeviceNotAvailable(pVCpu); \
11148 } while (0)
11149#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11150 do { \
11151 if (pVCpu->iem.s.uCpl != 0) \
11152 return iemRaiseGeneralProtectionFault0(pVCpu); \
11153 } while (0)
11154#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11155 do { \
11156 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11157 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11158 } while (0)
11159
11160
11161#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11162#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11163#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11164#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11165#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11166#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11167#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11168 uint32_t a_Name; \
11169 uint32_t *a_pName = &a_Name
11170#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11171 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11172
11173#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11174#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11175
11176#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11177#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11178#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11179#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11180#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11181#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11182#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11183#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11184#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11185#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11186#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11187#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11188#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11189#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11190#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11191#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11192#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11193#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11194#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11195#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11196#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11197#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11198#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11199#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11200#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11201#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11202#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11203#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11204#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11205/** @note Not for IOPL or IF testing or modification. */
11206#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11207#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11208#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11209#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11210
11211#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11212#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11213#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11214#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11215#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11216#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11217#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11218#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11219#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11220#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11221#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11222 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11223
11224#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11225#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11226/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11227 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11228#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11229#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11230/** @note Not for IOPL or IF testing or modification. */
11231#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11232
11233#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11234#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11235#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11236 do { \
11237 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11238 *pu32Reg += (a_u32Value); \
11239 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11240 } while (0)
11241#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11242
11243#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11244#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11245#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11246 do { \
11247 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11248 *pu32Reg -= (a_u32Value); \
11249 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11250 } while (0)
11251#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11252#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11253
11254#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11255#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11256#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11257#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11258#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11259#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11260#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11261
11262#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11263#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11264#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11265#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11266
11267#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11268#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11269#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11270
11271#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11272#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11273#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11274
11275#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11276#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11277#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11278
11279#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11280#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11281#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11282
11283#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11284
11285#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11286
11287#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11288#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11289#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11290 do { \
11291 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11292 *pu32Reg &= (a_u32Value); \
11293 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11294 } while (0)
11295#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11296
11297#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11298#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11299#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11300 do { \
11301 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11302 *pu32Reg |= (a_u32Value); \
11303 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11304 } while (0)
11305#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11306
11307
11308/** @note Not for IOPL or IF modification. */
11309#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11310/** @note Not for IOPL or IF modification. */
11311#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11312/** @note Not for IOPL or IF modification. */
11313#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11314
11315#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11316
11317/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11318#define IEM_MC_FPU_TO_MMX_MODE() do { \
11319 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11320 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11321 } while (0)
11322
11323#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11324 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11325#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11326 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11327#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11328 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11329 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11330 } while (0)
11331#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11332 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11333 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11334 } while (0)
11335#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11336 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11337#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11338 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11339#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11340 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11341
11342#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11343 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11344 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11345 } while (0)
11346#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11347 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11348#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11349 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11350#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11351 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11352#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11353 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11354 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11355 } while (0)
11356#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11357 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11358#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11359 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11360 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11361 } while (0)
11362#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11363 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11364#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11365 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11366 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11367 } while (0)
11368#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11369 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11370#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11371 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11372#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11373 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11374#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11375 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11376#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11377 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11378 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11379 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11380 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11381 } while (0)
11382
11383#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11384#define IEM_MC_STORE_YREG_U128_ZX(a_iYRegDst, a_u128Src) \
11385 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11386 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11387 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11388 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11389 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11390 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11391 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11392 } while (0)
11393#define IEM_MC_STORE_YREG_U256_ZX(a_iYRegDst, a_u256Src) \
11394 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11395 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11396 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11397 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11398 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11399 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11400 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11401 } while (0)
11402#define IEM_MC_COPY_YREG_U256_ZX(a_iYRegDst, a_iYRegSrc) \
11403 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11404 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11405 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11406 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11407 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11408 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11409 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11410 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11411 } while (0)
11412#define IEM_MC_COPY_YREG_U128_ZX(a_iYRegDst, a_iYRegSrc) \
11413 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11414 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11415 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11416 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11417 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11418 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11419 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11420 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11421 } while (0)
11422
11423#ifndef IEM_WITH_SETJMP
11424# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11425 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11426# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11427 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11428# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11429 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11430#else
11431# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11432 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11433# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11434 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11435# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11436 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11437#endif
11438
11439#ifndef IEM_WITH_SETJMP
11440# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11441 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11442# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11443 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11444# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11445 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11446#else
11447# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11448 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11449# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11450 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11451# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11452 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11453#endif
11454
11455#ifndef IEM_WITH_SETJMP
11456# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11457 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11458# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11459 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11460# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11461 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11462#else
11463# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11464 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11465# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11466 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11467# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11468 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11469#endif
11470
11471#ifdef SOME_UNUSED_FUNCTION
11472# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11473 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11474#endif
11475
11476#ifndef IEM_WITH_SETJMP
11477# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11478 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11479# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11480 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11481# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11482 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11483# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11484 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11485#else
11486# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11487 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11488# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11489 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11490# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11491 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11492# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11493 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11494#endif
11495
11496#ifndef IEM_WITH_SETJMP
11497# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11498 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11499# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11500 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11501# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11502 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11503#else
11504# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11505 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11506# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11507 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11508# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11509 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11510#endif
11511
11512#ifndef IEM_WITH_SETJMP
11513# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11514 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11515# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11516 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11517#else
11518# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11519 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11520# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11521 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11522#endif
11523
11524#ifndef IEM_WITH_SETJMP
11525# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11526 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11527# define IEM_MC_FETCH_MEM_U256_ALIGN_SSE(a_u256Dst, a_iSeg, a_GCPtrMem) \
11528 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11529#else
11530# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11531 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11532# define IEM_MC_FETCH_MEM_U256_ALIGN_SSE(a_u256Dst, a_iSeg, a_GCPtrMem) \
11533 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11534#endif
11535
11536
11537
11538#ifndef IEM_WITH_SETJMP
11539# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11540 do { \
11541 uint8_t u8Tmp; \
11542 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11543 (a_u16Dst) = u8Tmp; \
11544 } while (0)
11545# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11546 do { \
11547 uint8_t u8Tmp; \
11548 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11549 (a_u32Dst) = u8Tmp; \
11550 } while (0)
11551# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11552 do { \
11553 uint8_t u8Tmp; \
11554 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11555 (a_u64Dst) = u8Tmp; \
11556 } while (0)
11557# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11558 do { \
11559 uint16_t u16Tmp; \
11560 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11561 (a_u32Dst) = u16Tmp; \
11562 } while (0)
11563# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11564 do { \
11565 uint16_t u16Tmp; \
11566 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11567 (a_u64Dst) = u16Tmp; \
11568 } while (0)
11569# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11570 do { \
11571 uint32_t u32Tmp; \
11572 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11573 (a_u64Dst) = u32Tmp; \
11574 } while (0)
11575#else /* IEM_WITH_SETJMP */
11576# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11577 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11578# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11579 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11580# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11581 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11582# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11583 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11584# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11585 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11586# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11587 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11588#endif /* IEM_WITH_SETJMP */
11589
11590#ifndef IEM_WITH_SETJMP
11591# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11592 do { \
11593 uint8_t u8Tmp; \
11594 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11595 (a_u16Dst) = (int8_t)u8Tmp; \
11596 } while (0)
11597# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11598 do { \
11599 uint8_t u8Tmp; \
11600 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11601 (a_u32Dst) = (int8_t)u8Tmp; \
11602 } while (0)
11603# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11604 do { \
11605 uint8_t u8Tmp; \
11606 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11607 (a_u64Dst) = (int8_t)u8Tmp; \
11608 } while (0)
11609# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11610 do { \
11611 uint16_t u16Tmp; \
11612 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11613 (a_u32Dst) = (int16_t)u16Tmp; \
11614 } while (0)
11615# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11616 do { \
11617 uint16_t u16Tmp; \
11618 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11619 (a_u64Dst) = (int16_t)u16Tmp; \
11620 } while (0)
11621# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11622 do { \
11623 uint32_t u32Tmp; \
11624 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11625 (a_u64Dst) = (int32_t)u32Tmp; \
11626 } while (0)
11627#else /* IEM_WITH_SETJMP */
11628# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11629 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11630# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11631 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11632# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11633 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11634# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11635 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11636# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11637 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11638# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11639 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11640#endif /* IEM_WITH_SETJMP */
11641
11642#ifndef IEM_WITH_SETJMP
11643# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11644 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11645# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11646 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11647# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11648 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11649# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11650 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11651#else
11652# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11653 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11654# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11655 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11656# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11657 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11658# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11659 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11660#endif
11661
11662#ifndef IEM_WITH_SETJMP
11663# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11664 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11665# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11666 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11667# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11668 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11669# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11670 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11671#else
11672# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11673 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11674# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11675 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11676# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11677 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11678# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11679 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11680#endif
11681
11682#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11683#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11684#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11685#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11686#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11687#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11688#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11689 do { \
11690 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11691 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11692 } while (0)
11693
11694#ifndef IEM_WITH_SETJMP
11695# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11696 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11697# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11698 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11699#else
11700# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11701 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11702# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11703 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11704#endif
11705
11706
11707#define IEM_MC_PUSH_U16(a_u16Value) \
11708 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11709#define IEM_MC_PUSH_U32(a_u32Value) \
11710 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11711#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11712 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11713#define IEM_MC_PUSH_U64(a_u64Value) \
11714 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11715
11716#define IEM_MC_POP_U16(a_pu16Value) \
11717 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11718#define IEM_MC_POP_U32(a_pu32Value) \
11719 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11720#define IEM_MC_POP_U64(a_pu64Value) \
11721 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11722
11723/** Maps guest memory for direct or bounce buffered access.
11724 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11725 * @remarks May return.
11726 */
11727#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11728 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11729
11730/** Maps guest memory for direct or bounce buffered access.
11731 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11732 * @remarks May return.
11733 */
11734#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11735 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11736
11737/** Commits the memory and unmaps the guest memory.
11738 * @remarks May return.
11739 */
11740#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11741 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11742
11743/** Commits the memory and unmaps the guest memory unless the FPU status word
11744 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11745 * that would cause FLD not to store.
11746 *
11747 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11748 * store, while \#P will not.
11749 *
11750 * @remarks May in theory return - for now.
11751 */
11752#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11753 do { \
11754 if ( !(a_u16FSW & X86_FSW_ES) \
11755 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11756 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11757 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11758 } while (0)
11759
11760/** Calculate efficient address from R/M. */
11761#ifndef IEM_WITH_SETJMP
11762# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11763 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11764#else
11765# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11766 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11767#endif
11768
11769#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11770#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11771#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11772#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11773#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11774#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11775#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11776
11777/**
11778 * Defers the rest of the instruction emulation to a C implementation routine
11779 * and returns, only taking the standard parameters.
11780 *
11781 * @param a_pfnCImpl The pointer to the C routine.
11782 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11783 */
11784#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11785
11786/**
11787 * Defers the rest of instruction emulation to a C implementation routine and
11788 * returns, taking one argument in addition to the standard ones.
11789 *
11790 * @param a_pfnCImpl The pointer to the C routine.
11791 * @param a0 The argument.
11792 */
11793#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11794
11795/**
11796 * Defers the rest of the instruction emulation to a C implementation routine
11797 * and returns, taking two arguments in addition to the standard ones.
11798 *
11799 * @param a_pfnCImpl The pointer to the C routine.
11800 * @param a0 The first extra argument.
11801 * @param a1 The second extra argument.
11802 */
11803#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11804
11805/**
11806 * Defers the rest of the instruction emulation to a C implementation routine
11807 * and returns, taking three arguments in addition to the standard ones.
11808 *
11809 * @param a_pfnCImpl The pointer to the C routine.
11810 * @param a0 The first extra argument.
11811 * @param a1 The second extra argument.
11812 * @param a2 The third extra argument.
11813 */
11814#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11815
11816/**
11817 * Defers the rest of the instruction emulation to a C implementation routine
11818 * and returns, taking four arguments in addition to the standard ones.
11819 *
11820 * @param a_pfnCImpl The pointer to the C routine.
11821 * @param a0 The first extra argument.
11822 * @param a1 The second extra argument.
11823 * @param a2 The third extra argument.
11824 * @param a3 The fourth extra argument.
11825 */
11826#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11827
11828/**
11829 * Defers the rest of the instruction emulation to a C implementation routine
11830 * and returns, taking two arguments in addition to the standard ones.
11831 *
11832 * @param a_pfnCImpl The pointer to the C routine.
11833 * @param a0 The first extra argument.
11834 * @param a1 The second extra argument.
11835 * @param a2 The third extra argument.
11836 * @param a3 The fourth extra argument.
11837 * @param a4 The fifth extra argument.
11838 */
11839#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11840
11841/**
11842 * Defers the entire instruction emulation to a C implementation routine and
11843 * returns, only taking the standard parameters.
11844 *
11845 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11846 *
11847 * @param a_pfnCImpl The pointer to the C routine.
11848 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11849 */
11850#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11851
11852/**
11853 * Defers the entire instruction emulation to a C implementation routine and
11854 * returns, taking one argument in addition to the standard ones.
11855 *
11856 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11857 *
11858 * @param a_pfnCImpl The pointer to the C routine.
11859 * @param a0 The argument.
11860 */
11861#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11862
11863/**
11864 * Defers the entire instruction emulation to a C implementation routine and
11865 * returns, taking two arguments in addition to the standard ones.
11866 *
11867 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11868 *
11869 * @param a_pfnCImpl The pointer to the C routine.
11870 * @param a0 The first extra argument.
11871 * @param a1 The second extra argument.
11872 */
11873#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11874
11875/**
11876 * Defers the entire instruction emulation to a C implementation routine and
11877 * returns, taking three arguments in addition to the standard ones.
11878 *
11879 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11880 *
11881 * @param a_pfnCImpl The pointer to the C routine.
11882 * @param a0 The first extra argument.
11883 * @param a1 The second extra argument.
11884 * @param a2 The third extra argument.
11885 */
11886#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11887
11888/**
11889 * Calls a FPU assembly implementation taking one visible argument.
11890 *
11891 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11892 * @param a0 The first extra argument.
11893 */
11894#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11895 do { \
11896 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11897 } while (0)
11898
11899/**
11900 * Calls a FPU assembly implementation taking two visible arguments.
11901 *
11902 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11903 * @param a0 The first extra argument.
11904 * @param a1 The second extra argument.
11905 */
11906#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11907 do { \
11908 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11909 } while (0)
11910
11911/**
11912 * Calls a FPU assembly implementation taking three visible arguments.
11913 *
11914 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11915 * @param a0 The first extra argument.
11916 * @param a1 The second extra argument.
11917 * @param a2 The third extra argument.
11918 */
11919#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11920 do { \
11921 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11922 } while (0)
11923
11924#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11925 do { \
11926 (a_FpuData).FSW = (a_FSW); \
11927 (a_FpuData).r80Result = *(a_pr80Value); \
11928 } while (0)
11929
11930/** Pushes FPU result onto the stack. */
11931#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11932 iemFpuPushResult(pVCpu, &a_FpuData)
11933/** Pushes FPU result onto the stack and sets the FPUDP. */
11934#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11935 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11936
11937/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11938#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11939 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11940
11941/** Stores FPU result in a stack register. */
11942#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11943 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11944/** Stores FPU result in a stack register and pops the stack. */
11945#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11946 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11947/** Stores FPU result in a stack register and sets the FPUDP. */
11948#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11949 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11950/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11951 * stack. */
11952#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11953 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11954
11955/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11956#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11957 iemFpuUpdateOpcodeAndIp(pVCpu)
11958/** Free a stack register (for FFREE and FFREEP). */
11959#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11960 iemFpuStackFree(pVCpu, a_iStReg)
11961/** Increment the FPU stack pointer. */
11962#define IEM_MC_FPU_STACK_INC_TOP() \
11963 iemFpuStackIncTop(pVCpu)
11964/** Decrement the FPU stack pointer. */
11965#define IEM_MC_FPU_STACK_DEC_TOP() \
11966 iemFpuStackDecTop(pVCpu)
11967
11968/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11969#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11970 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11971/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11972#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11973 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11974/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11975#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11976 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11977/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11978#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11979 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11980/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11981 * stack. */
11982#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11983 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11984/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11985#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11986 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11987
11988/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11989#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11990 iemFpuStackUnderflow(pVCpu, a_iStDst)
11991/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11992 * stack. */
11993#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11994 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11995/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11996 * FPUDS. */
11997#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11998 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11999/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12000 * FPUDS. Pops stack. */
12001#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12002 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12003/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12004 * stack twice. */
12005#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12006 iemFpuStackUnderflowThenPopPop(pVCpu)
12007/** Raises a FPU stack underflow exception for an instruction pushing a result
12008 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12009#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12010 iemFpuStackPushUnderflow(pVCpu)
12011/** Raises a FPU stack underflow exception for an instruction pushing a result
12012 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12013#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12014 iemFpuStackPushUnderflowTwo(pVCpu)
12015
12016/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12017 * FPUIP, FPUCS and FOP. */
12018#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12019 iemFpuStackPushOverflow(pVCpu)
12020/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12021 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12022#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12023 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12024/** Prepares for using the FPU state.
12025 * Ensures that we can use the host FPU in the current context (RC+R0.
12026 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12027#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12028/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12029#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12030/** Actualizes the guest FPU state so it can be accessed and modified. */
12031#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12032
12033/** Prepares for using the SSE state.
12034 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12035 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12036#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12037/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12038#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12039/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12040#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12041
12042/** Prepares for using the AVX state.
12043 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12044 * Ensures the guest AVX state in the CPUMCTX is up to date.
12045 * @note This will include the AVX512 state too when support for it is added
12046 * due to the zero extending feature of VEX instruction. */
12047#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12048/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12049#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12050/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12051#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12052
12053/**
12054 * Calls a MMX assembly implementation taking two visible arguments.
12055 *
12056 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12057 * @param a0 The first extra argument.
12058 * @param a1 The second extra argument.
12059 */
12060#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12061 do { \
12062 IEM_MC_PREPARE_FPU_USAGE(); \
12063 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12064 } while (0)
12065
12066/**
12067 * Calls a MMX assembly implementation taking three visible arguments.
12068 *
12069 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12070 * @param a0 The first extra argument.
12071 * @param a1 The second extra argument.
12072 * @param a2 The third extra argument.
12073 */
12074#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12075 do { \
12076 IEM_MC_PREPARE_FPU_USAGE(); \
12077 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12078 } while (0)
12079
12080
12081/**
12082 * Calls a SSE assembly implementation taking two visible arguments.
12083 *
12084 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12085 * @param a0 The first extra argument.
12086 * @param a1 The second extra argument.
12087 */
12088#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12089 do { \
12090 IEM_MC_PREPARE_SSE_USAGE(); \
12091 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12092 } while (0)
12093
12094/**
12095 * Calls a SSE assembly implementation taking three visible arguments.
12096 *
12097 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12098 * @param a0 The first extra argument.
12099 * @param a1 The second extra argument.
12100 * @param a2 The third extra argument.
12101 */
12102#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12103 do { \
12104 IEM_MC_PREPARE_SSE_USAGE(); \
12105 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12106 } while (0)
12107
12108/** @note Not for IOPL or IF testing. */
12109#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12110/** @note Not for IOPL or IF testing. */
12111#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12112/** @note Not for IOPL or IF testing. */
12113#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12114/** @note Not for IOPL or IF testing. */
12115#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12116/** @note Not for IOPL or IF testing. */
12117#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12118 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12119 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12120/** @note Not for IOPL or IF testing. */
12121#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12122 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12123 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12124/** @note Not for IOPL or IF testing. */
12125#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12126 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12127 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12128 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12129/** @note Not for IOPL or IF testing. */
12130#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12131 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12132 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12133 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12134#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12135#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12136#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12137/** @note Not for IOPL or IF testing. */
12138#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12139 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12140 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12141/** @note Not for IOPL or IF testing. */
12142#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12143 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12144 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12145/** @note Not for IOPL or IF testing. */
12146#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12147 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12148 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12149/** @note Not for IOPL or IF testing. */
12150#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12151 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12152 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12153/** @note Not for IOPL or IF testing. */
12154#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12155 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12156 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12157/** @note Not for IOPL or IF testing. */
12158#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12159 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12160 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12161#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12162#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12163
12164#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12165 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12166#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12167 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12168#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12169 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12170#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12171 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12172#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12173 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12174#define IEM_MC_IF_FCW_IM() \
12175 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12176
12177#define IEM_MC_ELSE() } else {
12178#define IEM_MC_ENDIF() } do {} while (0)
12179
12180/** @} */
12181
12182
12183/** @name Opcode Debug Helpers.
12184 * @{
12185 */
12186#ifdef VBOX_WITH_STATISTICS
12187# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12188#else
12189# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12190#endif
12191
12192#ifdef DEBUG
12193# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12194 do { \
12195 IEMOP_INC_STATS(a_Stats); \
12196 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12197 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12198 } while (0)
12199
12200# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12201 do { \
12202 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12203 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12204 (void)RT_CONCAT(OP_,a_Upper); \
12205 (void)(a_fDisHints); \
12206 (void)(a_fIemHints); \
12207 } while (0)
12208
12209# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12210 do { \
12211 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12212 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12213 (void)RT_CONCAT(OP_,a_Upper); \
12214 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12215 (void)(a_fDisHints); \
12216 (void)(a_fIemHints); \
12217 } while (0)
12218
12219# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12220 do { \
12221 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12222 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12223 (void)RT_CONCAT(OP_,a_Upper); \
12224 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12225 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12226 (void)(a_fDisHints); \
12227 (void)(a_fIemHints); \
12228 } while (0)
12229
12230# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12231 do { \
12232 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12233 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12234 (void)RT_CONCAT(OP_,a_Upper); \
12235 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12236 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12237 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12238 (void)(a_fDisHints); \
12239 (void)(a_fIemHints); \
12240 } while (0)
12241
12242# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12243 do { \
12244 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12245 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12246 (void)RT_CONCAT(OP_,a_Upper); \
12247 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12248 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12249 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12250 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12251 (void)(a_fDisHints); \
12252 (void)(a_fIemHints); \
12253 } while (0)
12254
12255#else
12256# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12257
12258# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12259 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12260# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12261 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12262# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12263 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12264# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12265 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12266# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12267 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12268
12269#endif
12270
12271#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12272 IEMOP_MNEMONIC0EX(a_Lower, \
12273 #a_Lower, \
12274 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12275#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12276 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12277 #a_Lower " " #a_Op1, \
12278 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12279#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12280 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12281 #a_Lower " " #a_Op1 "," #a_Op2, \
12282 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12283#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12284 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12285 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12286 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12287#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12288 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12289 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12290 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12291
12292/** @} */
12293
12294
12295/** @name Opcode Helpers.
12296 * @{
12297 */
12298
12299#ifdef IN_RING3
12300# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12301 do { \
12302 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12303 else \
12304 { \
12305 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12306 return IEMOP_RAISE_INVALID_OPCODE(); \
12307 } \
12308 } while (0)
12309#else
12310# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12311 do { \
12312 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12313 else return IEMOP_RAISE_INVALID_OPCODE(); \
12314 } while (0)
12315#endif
12316
12317/** The instruction requires a 186 or later. */
12318#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12319# define IEMOP_HLP_MIN_186() do { } while (0)
12320#else
12321# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12322#endif
12323
12324/** The instruction requires a 286 or later. */
12325#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12326# define IEMOP_HLP_MIN_286() do { } while (0)
12327#else
12328# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12329#endif
12330
12331/** The instruction requires a 386 or later. */
12332#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12333# define IEMOP_HLP_MIN_386() do { } while (0)
12334#else
12335# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12336#endif
12337
12338/** The instruction requires a 386 or later if the given expression is true. */
12339#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12340# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12341#else
12342# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12343#endif
12344
12345/** The instruction requires a 486 or later. */
12346#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12347# define IEMOP_HLP_MIN_486() do { } while (0)
12348#else
12349# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12350#endif
12351
12352/** The instruction requires a Pentium (586) or later. */
12353#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12354# define IEMOP_HLP_MIN_586() do { } while (0)
12355#else
12356# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12357#endif
12358
12359/** The instruction requires a PentiumPro (686) or later. */
12360#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12361# define IEMOP_HLP_MIN_686() do { } while (0)
12362#else
12363# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12364#endif
12365
12366
12367/** The instruction raises an \#UD in real and V8086 mode. */
12368#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12369 do \
12370 { \
12371 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12372 else return IEMOP_RAISE_INVALID_OPCODE(); \
12373 } while (0)
12374
12375/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12376 * 64-bit mode. */
12377#define IEMOP_HLP_NO_64BIT() \
12378 do \
12379 { \
12380 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12381 return IEMOP_RAISE_INVALID_OPCODE(); \
12382 } while (0)
12383
12384/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12385 * 64-bit mode. */
12386#define IEMOP_HLP_ONLY_64BIT() \
12387 do \
12388 { \
12389 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12390 return IEMOP_RAISE_INVALID_OPCODE(); \
12391 } while (0)
12392
12393/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12394#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12395 do \
12396 { \
12397 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12398 iemRecalEffOpSize64Default(pVCpu); \
12399 } while (0)
12400
12401/** The instruction has 64-bit operand size if 64-bit mode. */
12402#define IEMOP_HLP_64BIT_OP_SIZE() \
12403 do \
12404 { \
12405 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12406 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12407 } while (0)
12408
12409/** Only a REX prefix immediately preceeding the first opcode byte takes
12410 * effect. This macro helps ensuring this as well as logging bad guest code. */
12411#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12412 do \
12413 { \
12414 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12415 { \
12416 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12417 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12418 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12419 pVCpu->iem.s.uRexB = 0; \
12420 pVCpu->iem.s.uRexIndex = 0; \
12421 pVCpu->iem.s.uRexReg = 0; \
12422 iemRecalEffOpSize(pVCpu); \
12423 } \
12424 } while (0)
12425
12426/**
12427 * Done decoding.
12428 */
12429#define IEMOP_HLP_DONE_DECODING() \
12430 do \
12431 { \
12432 /*nothing for now, maybe later... */ \
12433 } while (0)
12434
12435/**
12436 * Done decoding, raise \#UD exception if lock prefix present.
12437 */
12438#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12439 do \
12440 { \
12441 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12442 { /* likely */ } \
12443 else \
12444 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12445 } while (0)
12446
12447
12448/**
12449 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12450 * repnz or size prefixes are present, or if in real or v8086 mode.
12451 */
12452#define IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX() \
12453 do \
12454 { \
12455 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12456 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12457 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12458 { /* likely */ } \
12459 else \
12460 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12461 } while (0)
12462
12463
12464/**
12465 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12466 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12467 * register 0, or if in real or v8086 mode.
12468 */
12469#define IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX_AND_NO_VVVV() \
12470 do \
12471 { \
12472 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12473 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12474 && !pVCpu->iem.s.uVex3rdReg \
12475 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12476 { /* likely */ } \
12477 else \
12478 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12479 } while (0)
12480
12481#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12482 do \
12483 { \
12484 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12485 { /* likely */ } \
12486 else \
12487 { \
12488 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12489 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12490 } \
12491 } while (0)
12492#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12493 do \
12494 { \
12495 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12496 { /* likely */ } \
12497 else \
12498 { \
12499 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12500 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12501 } \
12502 } while (0)
12503
12504/**
12505 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12506 * are present.
12507 */
12508#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12509 do \
12510 { \
12511 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12512 { /* likely */ } \
12513 else \
12514 return IEMOP_RAISE_INVALID_OPCODE(); \
12515 } while (0)
12516
12517
12518/**
12519 * Done decoding VEX.
12520 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, or if
12521 * we're in real or v8086 mode.
12522 */
12523#define IEMOP_HLP_DONE_VEX_DECODING() \
12524 do \
12525 { \
12526 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12527 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12528 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12529 { /* likely */ } \
12530 else \
12531 return IEMOP_RAISE_INVALID_OPCODE(); \
12532 } while (0)
12533
12534/**
12535 * Done decoding VEX, no V, no L.
12536 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12537 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12538 */
12539#define IEMOP_HLP_DONE_VEX_DECODING_L_ZERO_NO_VVV() \
12540 do \
12541 { \
12542 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12543 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12544 && pVCpu->iem.s.uVexLength == 0 \
12545 && pVCpu->iem.s.uVex3rdReg == 0 \
12546 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12547 { /* likely */ } \
12548 else \
12549 return IEMOP_RAISE_INVALID_OPCODE(); \
12550 } while (0)
12551
12552#ifdef VBOX_WITH_NESTED_HWVIRT
12553/** Check and handles SVM nested-guest control & instruction intercept. */
12554# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12555 do \
12556 { \
12557 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12558 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12559 } while (0)
12560
12561/** Check and handle SVM nested-guest CR0 read intercept. */
12562# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12563 do \
12564 { \
12565 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12566 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12567 } while (0)
12568
12569#else
12570# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12571# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12572
12573#endif /* VBOX_WITH_NESTED_HWVIRT */
12574
12575
12576/**
12577 * Calculates the effective address of a ModR/M memory operand.
12578 *
12579 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12580 *
12581 * @return Strict VBox status code.
12582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12583 * @param bRm The ModRM byte.
12584 * @param cbImm The size of any immediate following the
12585 * effective address opcode bytes. Important for
12586 * RIP relative addressing.
12587 * @param pGCPtrEff Where to return the effective address.
12588 */
12589IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12590{
12591 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12592 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12593# define SET_SS_DEF() \
12594 do \
12595 { \
12596 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12597 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12598 } while (0)
12599
12600 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12601 {
12602/** @todo Check the effective address size crap! */
12603 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12604 {
12605 uint16_t u16EffAddr;
12606
12607 /* Handle the disp16 form with no registers first. */
12608 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12609 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12610 else
12611 {
12612 /* Get the displacment. */
12613 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12614 {
12615 case 0: u16EffAddr = 0; break;
12616 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12617 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12618 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12619 }
12620
12621 /* Add the base and index registers to the disp. */
12622 switch (bRm & X86_MODRM_RM_MASK)
12623 {
12624 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12625 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12626 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12627 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12628 case 4: u16EffAddr += pCtx->si; break;
12629 case 5: u16EffAddr += pCtx->di; break;
12630 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12631 case 7: u16EffAddr += pCtx->bx; break;
12632 }
12633 }
12634
12635 *pGCPtrEff = u16EffAddr;
12636 }
12637 else
12638 {
12639 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12640 uint32_t u32EffAddr;
12641
12642 /* Handle the disp32 form with no registers first. */
12643 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12644 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12645 else
12646 {
12647 /* Get the register (or SIB) value. */
12648 switch ((bRm & X86_MODRM_RM_MASK))
12649 {
12650 case 0: u32EffAddr = pCtx->eax; break;
12651 case 1: u32EffAddr = pCtx->ecx; break;
12652 case 2: u32EffAddr = pCtx->edx; break;
12653 case 3: u32EffAddr = pCtx->ebx; break;
12654 case 4: /* SIB */
12655 {
12656 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12657
12658 /* Get the index and scale it. */
12659 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12660 {
12661 case 0: u32EffAddr = pCtx->eax; break;
12662 case 1: u32EffAddr = pCtx->ecx; break;
12663 case 2: u32EffAddr = pCtx->edx; break;
12664 case 3: u32EffAddr = pCtx->ebx; break;
12665 case 4: u32EffAddr = 0; /*none */ break;
12666 case 5: u32EffAddr = pCtx->ebp; break;
12667 case 6: u32EffAddr = pCtx->esi; break;
12668 case 7: u32EffAddr = pCtx->edi; break;
12669 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12670 }
12671 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12672
12673 /* add base */
12674 switch (bSib & X86_SIB_BASE_MASK)
12675 {
12676 case 0: u32EffAddr += pCtx->eax; break;
12677 case 1: u32EffAddr += pCtx->ecx; break;
12678 case 2: u32EffAddr += pCtx->edx; break;
12679 case 3: u32EffAddr += pCtx->ebx; break;
12680 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12681 case 5:
12682 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12683 {
12684 u32EffAddr += pCtx->ebp;
12685 SET_SS_DEF();
12686 }
12687 else
12688 {
12689 uint32_t u32Disp;
12690 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12691 u32EffAddr += u32Disp;
12692 }
12693 break;
12694 case 6: u32EffAddr += pCtx->esi; break;
12695 case 7: u32EffAddr += pCtx->edi; break;
12696 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12697 }
12698 break;
12699 }
12700 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12701 case 6: u32EffAddr = pCtx->esi; break;
12702 case 7: u32EffAddr = pCtx->edi; break;
12703 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12704 }
12705
12706 /* Get and add the displacement. */
12707 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12708 {
12709 case 0:
12710 break;
12711 case 1:
12712 {
12713 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12714 u32EffAddr += i8Disp;
12715 break;
12716 }
12717 case 2:
12718 {
12719 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12720 u32EffAddr += u32Disp;
12721 break;
12722 }
12723 default:
12724 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12725 }
12726
12727 }
12728 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12729 *pGCPtrEff = u32EffAddr;
12730 else
12731 {
12732 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12733 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12734 }
12735 }
12736 }
12737 else
12738 {
12739 uint64_t u64EffAddr;
12740
12741 /* Handle the rip+disp32 form with no registers first. */
12742 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12743 {
12744 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12745 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12746 }
12747 else
12748 {
12749 /* Get the register (or SIB) value. */
12750 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12751 {
12752 case 0: u64EffAddr = pCtx->rax; break;
12753 case 1: u64EffAddr = pCtx->rcx; break;
12754 case 2: u64EffAddr = pCtx->rdx; break;
12755 case 3: u64EffAddr = pCtx->rbx; break;
12756 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12757 case 6: u64EffAddr = pCtx->rsi; break;
12758 case 7: u64EffAddr = pCtx->rdi; break;
12759 case 8: u64EffAddr = pCtx->r8; break;
12760 case 9: u64EffAddr = pCtx->r9; break;
12761 case 10: u64EffAddr = pCtx->r10; break;
12762 case 11: u64EffAddr = pCtx->r11; break;
12763 case 13: u64EffAddr = pCtx->r13; break;
12764 case 14: u64EffAddr = pCtx->r14; break;
12765 case 15: u64EffAddr = pCtx->r15; break;
12766 /* SIB */
12767 case 4:
12768 case 12:
12769 {
12770 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12771
12772 /* Get the index and scale it. */
12773 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12774 {
12775 case 0: u64EffAddr = pCtx->rax; break;
12776 case 1: u64EffAddr = pCtx->rcx; break;
12777 case 2: u64EffAddr = pCtx->rdx; break;
12778 case 3: u64EffAddr = pCtx->rbx; break;
12779 case 4: u64EffAddr = 0; /*none */ break;
12780 case 5: u64EffAddr = pCtx->rbp; break;
12781 case 6: u64EffAddr = pCtx->rsi; break;
12782 case 7: u64EffAddr = pCtx->rdi; break;
12783 case 8: u64EffAddr = pCtx->r8; break;
12784 case 9: u64EffAddr = pCtx->r9; break;
12785 case 10: u64EffAddr = pCtx->r10; break;
12786 case 11: u64EffAddr = pCtx->r11; break;
12787 case 12: u64EffAddr = pCtx->r12; break;
12788 case 13: u64EffAddr = pCtx->r13; break;
12789 case 14: u64EffAddr = pCtx->r14; break;
12790 case 15: u64EffAddr = pCtx->r15; break;
12791 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12792 }
12793 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12794
12795 /* add base */
12796 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12797 {
12798 case 0: u64EffAddr += pCtx->rax; break;
12799 case 1: u64EffAddr += pCtx->rcx; break;
12800 case 2: u64EffAddr += pCtx->rdx; break;
12801 case 3: u64EffAddr += pCtx->rbx; break;
12802 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12803 case 6: u64EffAddr += pCtx->rsi; break;
12804 case 7: u64EffAddr += pCtx->rdi; break;
12805 case 8: u64EffAddr += pCtx->r8; break;
12806 case 9: u64EffAddr += pCtx->r9; break;
12807 case 10: u64EffAddr += pCtx->r10; break;
12808 case 11: u64EffAddr += pCtx->r11; break;
12809 case 12: u64EffAddr += pCtx->r12; break;
12810 case 14: u64EffAddr += pCtx->r14; break;
12811 case 15: u64EffAddr += pCtx->r15; break;
12812 /* complicated encodings */
12813 case 5:
12814 case 13:
12815 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12816 {
12817 if (!pVCpu->iem.s.uRexB)
12818 {
12819 u64EffAddr += pCtx->rbp;
12820 SET_SS_DEF();
12821 }
12822 else
12823 u64EffAddr += pCtx->r13;
12824 }
12825 else
12826 {
12827 uint32_t u32Disp;
12828 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12829 u64EffAddr += (int32_t)u32Disp;
12830 }
12831 break;
12832 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12833 }
12834 break;
12835 }
12836 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12837 }
12838
12839 /* Get and add the displacement. */
12840 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12841 {
12842 case 0:
12843 break;
12844 case 1:
12845 {
12846 int8_t i8Disp;
12847 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12848 u64EffAddr += i8Disp;
12849 break;
12850 }
12851 case 2:
12852 {
12853 uint32_t u32Disp;
12854 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12855 u64EffAddr += (int32_t)u32Disp;
12856 break;
12857 }
12858 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12859 }
12860
12861 }
12862
12863 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12864 *pGCPtrEff = u64EffAddr;
12865 else
12866 {
12867 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12868 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12869 }
12870 }
12871
12872 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12873 return VINF_SUCCESS;
12874}
12875
12876
12877/**
12878 * Calculates the effective address of a ModR/M memory operand.
12879 *
12880 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12881 *
12882 * @return Strict VBox status code.
12883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12884 * @param bRm The ModRM byte.
12885 * @param cbImm The size of any immediate following the
12886 * effective address opcode bytes. Important for
12887 * RIP relative addressing.
12888 * @param pGCPtrEff Where to return the effective address.
12889 * @param offRsp RSP displacement.
12890 */
12891IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
12892{
12893 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12894 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12895# define SET_SS_DEF() \
12896 do \
12897 { \
12898 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12899 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12900 } while (0)
12901
12902 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12903 {
12904/** @todo Check the effective address size crap! */
12905 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12906 {
12907 uint16_t u16EffAddr;
12908
12909 /* Handle the disp16 form with no registers first. */
12910 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12911 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12912 else
12913 {
12914 /* Get the displacment. */
12915 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12916 {
12917 case 0: u16EffAddr = 0; break;
12918 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12919 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12920 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12921 }
12922
12923 /* Add the base and index registers to the disp. */
12924 switch (bRm & X86_MODRM_RM_MASK)
12925 {
12926 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12927 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12928 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12929 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12930 case 4: u16EffAddr += pCtx->si; break;
12931 case 5: u16EffAddr += pCtx->di; break;
12932 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12933 case 7: u16EffAddr += pCtx->bx; break;
12934 }
12935 }
12936
12937 *pGCPtrEff = u16EffAddr;
12938 }
12939 else
12940 {
12941 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12942 uint32_t u32EffAddr;
12943
12944 /* Handle the disp32 form with no registers first. */
12945 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12946 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12947 else
12948 {
12949 /* Get the register (or SIB) value. */
12950 switch ((bRm & X86_MODRM_RM_MASK))
12951 {
12952 case 0: u32EffAddr = pCtx->eax; break;
12953 case 1: u32EffAddr = pCtx->ecx; break;
12954 case 2: u32EffAddr = pCtx->edx; break;
12955 case 3: u32EffAddr = pCtx->ebx; break;
12956 case 4: /* SIB */
12957 {
12958 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12959
12960 /* Get the index and scale it. */
12961 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12962 {
12963 case 0: u32EffAddr = pCtx->eax; break;
12964 case 1: u32EffAddr = pCtx->ecx; break;
12965 case 2: u32EffAddr = pCtx->edx; break;
12966 case 3: u32EffAddr = pCtx->ebx; break;
12967 case 4: u32EffAddr = 0; /*none */ break;
12968 case 5: u32EffAddr = pCtx->ebp; break;
12969 case 6: u32EffAddr = pCtx->esi; break;
12970 case 7: u32EffAddr = pCtx->edi; break;
12971 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12972 }
12973 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12974
12975 /* add base */
12976 switch (bSib & X86_SIB_BASE_MASK)
12977 {
12978 case 0: u32EffAddr += pCtx->eax; break;
12979 case 1: u32EffAddr += pCtx->ecx; break;
12980 case 2: u32EffAddr += pCtx->edx; break;
12981 case 3: u32EffAddr += pCtx->ebx; break;
12982 case 4:
12983 u32EffAddr += pCtx->esp + offRsp;
12984 SET_SS_DEF();
12985 break;
12986 case 5:
12987 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12988 {
12989 u32EffAddr += pCtx->ebp;
12990 SET_SS_DEF();
12991 }
12992 else
12993 {
12994 uint32_t u32Disp;
12995 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12996 u32EffAddr += u32Disp;
12997 }
12998 break;
12999 case 6: u32EffAddr += pCtx->esi; break;
13000 case 7: u32EffAddr += pCtx->edi; break;
13001 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13002 }
13003 break;
13004 }
13005 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13006 case 6: u32EffAddr = pCtx->esi; break;
13007 case 7: u32EffAddr = pCtx->edi; break;
13008 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13009 }
13010
13011 /* Get and add the displacement. */
13012 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13013 {
13014 case 0:
13015 break;
13016 case 1:
13017 {
13018 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13019 u32EffAddr += i8Disp;
13020 break;
13021 }
13022 case 2:
13023 {
13024 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13025 u32EffAddr += u32Disp;
13026 break;
13027 }
13028 default:
13029 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13030 }
13031
13032 }
13033 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13034 *pGCPtrEff = u32EffAddr;
13035 else
13036 {
13037 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13038 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13039 }
13040 }
13041 }
13042 else
13043 {
13044 uint64_t u64EffAddr;
13045
13046 /* Handle the rip+disp32 form with no registers first. */
13047 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13048 {
13049 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13050 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13051 }
13052 else
13053 {
13054 /* Get the register (or SIB) value. */
13055 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13056 {
13057 case 0: u64EffAddr = pCtx->rax; break;
13058 case 1: u64EffAddr = pCtx->rcx; break;
13059 case 2: u64EffAddr = pCtx->rdx; break;
13060 case 3: u64EffAddr = pCtx->rbx; break;
13061 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13062 case 6: u64EffAddr = pCtx->rsi; break;
13063 case 7: u64EffAddr = pCtx->rdi; break;
13064 case 8: u64EffAddr = pCtx->r8; break;
13065 case 9: u64EffAddr = pCtx->r9; break;
13066 case 10: u64EffAddr = pCtx->r10; break;
13067 case 11: u64EffAddr = pCtx->r11; break;
13068 case 13: u64EffAddr = pCtx->r13; break;
13069 case 14: u64EffAddr = pCtx->r14; break;
13070 case 15: u64EffAddr = pCtx->r15; break;
13071 /* SIB */
13072 case 4:
13073 case 12:
13074 {
13075 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13076
13077 /* Get the index and scale it. */
13078 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13079 {
13080 case 0: u64EffAddr = pCtx->rax; break;
13081 case 1: u64EffAddr = pCtx->rcx; break;
13082 case 2: u64EffAddr = pCtx->rdx; break;
13083 case 3: u64EffAddr = pCtx->rbx; break;
13084 case 4: u64EffAddr = 0; /*none */ break;
13085 case 5: u64EffAddr = pCtx->rbp; break;
13086 case 6: u64EffAddr = pCtx->rsi; break;
13087 case 7: u64EffAddr = pCtx->rdi; break;
13088 case 8: u64EffAddr = pCtx->r8; break;
13089 case 9: u64EffAddr = pCtx->r9; break;
13090 case 10: u64EffAddr = pCtx->r10; break;
13091 case 11: u64EffAddr = pCtx->r11; break;
13092 case 12: u64EffAddr = pCtx->r12; break;
13093 case 13: u64EffAddr = pCtx->r13; break;
13094 case 14: u64EffAddr = pCtx->r14; break;
13095 case 15: u64EffAddr = pCtx->r15; break;
13096 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13097 }
13098 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13099
13100 /* add base */
13101 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13102 {
13103 case 0: u64EffAddr += pCtx->rax; break;
13104 case 1: u64EffAddr += pCtx->rcx; break;
13105 case 2: u64EffAddr += pCtx->rdx; break;
13106 case 3: u64EffAddr += pCtx->rbx; break;
13107 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13108 case 6: u64EffAddr += pCtx->rsi; break;
13109 case 7: u64EffAddr += pCtx->rdi; break;
13110 case 8: u64EffAddr += pCtx->r8; break;
13111 case 9: u64EffAddr += pCtx->r9; break;
13112 case 10: u64EffAddr += pCtx->r10; break;
13113 case 11: u64EffAddr += pCtx->r11; break;
13114 case 12: u64EffAddr += pCtx->r12; break;
13115 case 14: u64EffAddr += pCtx->r14; break;
13116 case 15: u64EffAddr += pCtx->r15; break;
13117 /* complicated encodings */
13118 case 5:
13119 case 13:
13120 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13121 {
13122 if (!pVCpu->iem.s.uRexB)
13123 {
13124 u64EffAddr += pCtx->rbp;
13125 SET_SS_DEF();
13126 }
13127 else
13128 u64EffAddr += pCtx->r13;
13129 }
13130 else
13131 {
13132 uint32_t u32Disp;
13133 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13134 u64EffAddr += (int32_t)u32Disp;
13135 }
13136 break;
13137 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13138 }
13139 break;
13140 }
13141 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13142 }
13143
13144 /* Get and add the displacement. */
13145 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13146 {
13147 case 0:
13148 break;
13149 case 1:
13150 {
13151 int8_t i8Disp;
13152 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13153 u64EffAddr += i8Disp;
13154 break;
13155 }
13156 case 2:
13157 {
13158 uint32_t u32Disp;
13159 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13160 u64EffAddr += (int32_t)u32Disp;
13161 break;
13162 }
13163 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13164 }
13165
13166 }
13167
13168 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13169 *pGCPtrEff = u64EffAddr;
13170 else
13171 {
13172 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13173 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13174 }
13175 }
13176
13177 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13178 return VINF_SUCCESS;
13179}
13180
13181
13182#ifdef IEM_WITH_SETJMP
13183/**
13184 * Calculates the effective address of a ModR/M memory operand.
13185 *
13186 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13187 *
13188 * May longjmp on internal error.
13189 *
13190 * @return The effective address.
13191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13192 * @param bRm The ModRM byte.
13193 * @param cbImm The size of any immediate following the
13194 * effective address opcode bytes. Important for
13195 * RIP relative addressing.
13196 */
13197IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13198{
13199 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13200 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13201# define SET_SS_DEF() \
13202 do \
13203 { \
13204 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13205 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13206 } while (0)
13207
13208 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13209 {
13210/** @todo Check the effective address size crap! */
13211 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13212 {
13213 uint16_t u16EffAddr;
13214
13215 /* Handle the disp16 form with no registers first. */
13216 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13217 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13218 else
13219 {
13220 /* Get the displacment. */
13221 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13222 {
13223 case 0: u16EffAddr = 0; break;
13224 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13225 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13226 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13227 }
13228
13229 /* Add the base and index registers to the disp. */
13230 switch (bRm & X86_MODRM_RM_MASK)
13231 {
13232 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13233 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13234 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13235 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13236 case 4: u16EffAddr += pCtx->si; break;
13237 case 5: u16EffAddr += pCtx->di; break;
13238 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13239 case 7: u16EffAddr += pCtx->bx; break;
13240 }
13241 }
13242
13243 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13244 return u16EffAddr;
13245 }
13246
13247 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13248 uint32_t u32EffAddr;
13249
13250 /* Handle the disp32 form with no registers first. */
13251 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13252 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13253 else
13254 {
13255 /* Get the register (or SIB) value. */
13256 switch ((bRm & X86_MODRM_RM_MASK))
13257 {
13258 case 0: u32EffAddr = pCtx->eax; break;
13259 case 1: u32EffAddr = pCtx->ecx; break;
13260 case 2: u32EffAddr = pCtx->edx; break;
13261 case 3: u32EffAddr = pCtx->ebx; break;
13262 case 4: /* SIB */
13263 {
13264 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13265
13266 /* Get the index and scale it. */
13267 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13268 {
13269 case 0: u32EffAddr = pCtx->eax; break;
13270 case 1: u32EffAddr = pCtx->ecx; break;
13271 case 2: u32EffAddr = pCtx->edx; break;
13272 case 3: u32EffAddr = pCtx->ebx; break;
13273 case 4: u32EffAddr = 0; /*none */ break;
13274 case 5: u32EffAddr = pCtx->ebp; break;
13275 case 6: u32EffAddr = pCtx->esi; break;
13276 case 7: u32EffAddr = pCtx->edi; break;
13277 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13278 }
13279 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13280
13281 /* add base */
13282 switch (bSib & X86_SIB_BASE_MASK)
13283 {
13284 case 0: u32EffAddr += pCtx->eax; break;
13285 case 1: u32EffAddr += pCtx->ecx; break;
13286 case 2: u32EffAddr += pCtx->edx; break;
13287 case 3: u32EffAddr += pCtx->ebx; break;
13288 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13289 case 5:
13290 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13291 {
13292 u32EffAddr += pCtx->ebp;
13293 SET_SS_DEF();
13294 }
13295 else
13296 {
13297 uint32_t u32Disp;
13298 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13299 u32EffAddr += u32Disp;
13300 }
13301 break;
13302 case 6: u32EffAddr += pCtx->esi; break;
13303 case 7: u32EffAddr += pCtx->edi; break;
13304 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13305 }
13306 break;
13307 }
13308 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13309 case 6: u32EffAddr = pCtx->esi; break;
13310 case 7: u32EffAddr = pCtx->edi; break;
13311 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13312 }
13313
13314 /* Get and add the displacement. */
13315 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13316 {
13317 case 0:
13318 break;
13319 case 1:
13320 {
13321 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13322 u32EffAddr += i8Disp;
13323 break;
13324 }
13325 case 2:
13326 {
13327 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13328 u32EffAddr += u32Disp;
13329 break;
13330 }
13331 default:
13332 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13333 }
13334 }
13335
13336 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13337 {
13338 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13339 return u32EffAddr;
13340 }
13341 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13342 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13343 return u32EffAddr & UINT16_MAX;
13344 }
13345
13346 uint64_t u64EffAddr;
13347
13348 /* Handle the rip+disp32 form with no registers first. */
13349 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13350 {
13351 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13352 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13353 }
13354 else
13355 {
13356 /* Get the register (or SIB) value. */
13357 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13358 {
13359 case 0: u64EffAddr = pCtx->rax; break;
13360 case 1: u64EffAddr = pCtx->rcx; break;
13361 case 2: u64EffAddr = pCtx->rdx; break;
13362 case 3: u64EffAddr = pCtx->rbx; break;
13363 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13364 case 6: u64EffAddr = pCtx->rsi; break;
13365 case 7: u64EffAddr = pCtx->rdi; break;
13366 case 8: u64EffAddr = pCtx->r8; break;
13367 case 9: u64EffAddr = pCtx->r9; break;
13368 case 10: u64EffAddr = pCtx->r10; break;
13369 case 11: u64EffAddr = pCtx->r11; break;
13370 case 13: u64EffAddr = pCtx->r13; break;
13371 case 14: u64EffAddr = pCtx->r14; break;
13372 case 15: u64EffAddr = pCtx->r15; break;
13373 /* SIB */
13374 case 4:
13375 case 12:
13376 {
13377 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13378
13379 /* Get the index and scale it. */
13380 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13381 {
13382 case 0: u64EffAddr = pCtx->rax; break;
13383 case 1: u64EffAddr = pCtx->rcx; break;
13384 case 2: u64EffAddr = pCtx->rdx; break;
13385 case 3: u64EffAddr = pCtx->rbx; break;
13386 case 4: u64EffAddr = 0; /*none */ break;
13387 case 5: u64EffAddr = pCtx->rbp; break;
13388 case 6: u64EffAddr = pCtx->rsi; break;
13389 case 7: u64EffAddr = pCtx->rdi; break;
13390 case 8: u64EffAddr = pCtx->r8; break;
13391 case 9: u64EffAddr = pCtx->r9; break;
13392 case 10: u64EffAddr = pCtx->r10; break;
13393 case 11: u64EffAddr = pCtx->r11; break;
13394 case 12: u64EffAddr = pCtx->r12; break;
13395 case 13: u64EffAddr = pCtx->r13; break;
13396 case 14: u64EffAddr = pCtx->r14; break;
13397 case 15: u64EffAddr = pCtx->r15; break;
13398 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13399 }
13400 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13401
13402 /* add base */
13403 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13404 {
13405 case 0: u64EffAddr += pCtx->rax; break;
13406 case 1: u64EffAddr += pCtx->rcx; break;
13407 case 2: u64EffAddr += pCtx->rdx; break;
13408 case 3: u64EffAddr += pCtx->rbx; break;
13409 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13410 case 6: u64EffAddr += pCtx->rsi; break;
13411 case 7: u64EffAddr += pCtx->rdi; break;
13412 case 8: u64EffAddr += pCtx->r8; break;
13413 case 9: u64EffAddr += pCtx->r9; break;
13414 case 10: u64EffAddr += pCtx->r10; break;
13415 case 11: u64EffAddr += pCtx->r11; break;
13416 case 12: u64EffAddr += pCtx->r12; break;
13417 case 14: u64EffAddr += pCtx->r14; break;
13418 case 15: u64EffAddr += pCtx->r15; break;
13419 /* complicated encodings */
13420 case 5:
13421 case 13:
13422 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13423 {
13424 if (!pVCpu->iem.s.uRexB)
13425 {
13426 u64EffAddr += pCtx->rbp;
13427 SET_SS_DEF();
13428 }
13429 else
13430 u64EffAddr += pCtx->r13;
13431 }
13432 else
13433 {
13434 uint32_t u32Disp;
13435 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13436 u64EffAddr += (int32_t)u32Disp;
13437 }
13438 break;
13439 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13440 }
13441 break;
13442 }
13443 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13444 }
13445
13446 /* Get and add the displacement. */
13447 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13448 {
13449 case 0:
13450 break;
13451 case 1:
13452 {
13453 int8_t i8Disp;
13454 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13455 u64EffAddr += i8Disp;
13456 break;
13457 }
13458 case 2:
13459 {
13460 uint32_t u32Disp;
13461 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13462 u64EffAddr += (int32_t)u32Disp;
13463 break;
13464 }
13465 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13466 }
13467
13468 }
13469
13470 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13471 {
13472 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13473 return u64EffAddr;
13474 }
13475 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13476 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13477 return u64EffAddr & UINT32_MAX;
13478}
13479#endif /* IEM_WITH_SETJMP */
13480
13481
13482/** @} */
13483
13484
13485
13486/*
13487 * Include the instructions
13488 */
13489#include "IEMAllInstructions.cpp.h"
13490
13491
13492
13493
13494#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13495
13496/**
13497 * Sets up execution verification mode.
13498 */
13499IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13500{
13501 PVMCPU pVCpu = pVCpu;
13502 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13503
13504 /*
13505 * Always note down the address of the current instruction.
13506 */
13507 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13508 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13509
13510 /*
13511 * Enable verification and/or logging.
13512 */
13513 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13514 if ( fNewNoRem
13515 && ( 0
13516#if 0 /* auto enable on first paged protected mode interrupt */
13517 || ( pOrgCtx->eflags.Bits.u1IF
13518 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13519 && TRPMHasTrap(pVCpu)
13520 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13521#endif
13522#if 0
13523 || ( pOrgCtx->cs == 0x10
13524 && ( pOrgCtx->rip == 0x90119e3e
13525 || pOrgCtx->rip == 0x901d9810)
13526#endif
13527#if 0 /* Auto enable DSL - FPU stuff. */
13528 || ( pOrgCtx->cs == 0x10
13529 && (// pOrgCtx->rip == 0xc02ec07f
13530 //|| pOrgCtx->rip == 0xc02ec082
13531 //|| pOrgCtx->rip == 0xc02ec0c9
13532 0
13533 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13534#endif
13535#if 0 /* Auto enable DSL - fstp st0 stuff. */
13536 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13537#endif
13538#if 0
13539 || pOrgCtx->rip == 0x9022bb3a
13540#endif
13541#if 0
13542 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13543#endif
13544#if 0
13545 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13546 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13547#endif
13548#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13549 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13550 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13551 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13552#endif
13553#if 0 /* NT4SP1 - xadd early boot. */
13554 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13555#endif
13556#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13557 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13558#endif
13559#if 0 /* NT4SP1 - cmpxchg (AMD). */
13560 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13561#endif
13562#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13563 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13564#endif
13565#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13566 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13567
13568#endif
13569#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13570 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13571
13572#endif
13573#if 0 /* NT4SP1 - frstor [ecx] */
13574 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13575#endif
13576#if 0 /* xxxxxx - All long mode code. */
13577 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13578#endif
13579#if 0 /* rep movsq linux 3.7 64-bit boot. */
13580 || (pOrgCtx->rip == 0x0000000000100241)
13581#endif
13582#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13583 || (pOrgCtx->rip == 0x000000000215e240)
13584#endif
13585#if 0 /* DOS's size-overridden iret to v8086. */
13586 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13587#endif
13588 )
13589 )
13590 {
13591 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13592 RTLogFlags(NULL, "enabled");
13593 fNewNoRem = false;
13594 }
13595 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13596 {
13597 pVCpu->iem.s.fNoRem = fNewNoRem;
13598 if (!fNewNoRem)
13599 {
13600 LogAlways(("Enabling verification mode!\n"));
13601 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13602 }
13603 else
13604 LogAlways(("Disabling verification mode!\n"));
13605 }
13606
13607 /*
13608 * Switch state.
13609 */
13610 if (IEM_VERIFICATION_ENABLED(pVCpu))
13611 {
13612 static CPUMCTX s_DebugCtx; /* Ugly! */
13613
13614 s_DebugCtx = *pOrgCtx;
13615 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13616 }
13617
13618 /*
13619 * See if there is an interrupt pending in TRPM and inject it if we can.
13620 */
13621 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13622 if ( pOrgCtx->eflags.Bits.u1IF
13623 && TRPMHasTrap(pVCpu)
13624 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13625 {
13626 uint8_t u8TrapNo;
13627 TRPMEVENT enmType;
13628 RTGCUINT uErrCode;
13629 RTGCPTR uCr2;
13630 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13631 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13632 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13633 TRPMResetTrap(pVCpu);
13634 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13635 }
13636
13637 /*
13638 * Reset the counters.
13639 */
13640 pVCpu->iem.s.cIOReads = 0;
13641 pVCpu->iem.s.cIOWrites = 0;
13642 pVCpu->iem.s.fIgnoreRaxRdx = false;
13643 pVCpu->iem.s.fOverlappingMovs = false;
13644 pVCpu->iem.s.fProblematicMemory = false;
13645 pVCpu->iem.s.fUndefinedEFlags = 0;
13646
13647 if (IEM_VERIFICATION_ENABLED(pVCpu))
13648 {
13649 /*
13650 * Free all verification records.
13651 */
13652 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13653 pVCpu->iem.s.pIemEvtRecHead = NULL;
13654 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13655 do
13656 {
13657 while (pEvtRec)
13658 {
13659 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13660 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13661 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13662 pEvtRec = pNext;
13663 }
13664 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13665 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13666 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13667 } while (pEvtRec);
13668 }
13669}
13670
13671
13672/**
13673 * Allocate an event record.
13674 * @returns Pointer to a record.
13675 */
13676IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13677{
13678 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13679 return NULL;
13680
13681 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13682 if (pEvtRec)
13683 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13684 else
13685 {
13686 if (!pVCpu->iem.s.ppIemEvtRecNext)
13687 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13688
13689 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13690 if (!pEvtRec)
13691 return NULL;
13692 }
13693 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13694 pEvtRec->pNext = NULL;
13695 return pEvtRec;
13696}
13697
13698
13699/**
13700 * IOMMMIORead notification.
13701 */
13702VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
13703{
13704 PVMCPU pVCpu = VMMGetCpu(pVM);
13705 if (!pVCpu)
13706 return;
13707 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13708 if (!pEvtRec)
13709 return;
13710 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
13711 pEvtRec->u.RamRead.GCPhys = GCPhys;
13712 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
13713 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13714 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13715}
13716
13717
13718/**
13719 * IOMMMIOWrite notification.
13720 */
13721VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
13722{
13723 PVMCPU pVCpu = VMMGetCpu(pVM);
13724 if (!pVCpu)
13725 return;
13726 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13727 if (!pEvtRec)
13728 return;
13729 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
13730 pEvtRec->u.RamWrite.GCPhys = GCPhys;
13731 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
13732 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
13733 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
13734 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
13735 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
13736 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13737 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13738}
13739
13740
13741/**
13742 * IOMIOPortRead notification.
13743 */
13744VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
13745{
13746 PVMCPU pVCpu = VMMGetCpu(pVM);
13747 if (!pVCpu)
13748 return;
13749 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13750 if (!pEvtRec)
13751 return;
13752 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13753 pEvtRec->u.IOPortRead.Port = Port;
13754 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13755 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13756 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13757}
13758
13759/**
13760 * IOMIOPortWrite notification.
13761 */
13762VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13763{
13764 PVMCPU pVCpu = VMMGetCpu(pVM);
13765 if (!pVCpu)
13766 return;
13767 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13768 if (!pEvtRec)
13769 return;
13770 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13771 pEvtRec->u.IOPortWrite.Port = Port;
13772 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13773 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13774 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13775 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13776}
13777
13778
13779VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
13780{
13781 PVMCPU pVCpu = VMMGetCpu(pVM);
13782 if (!pVCpu)
13783 return;
13784 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13785 if (!pEvtRec)
13786 return;
13787 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
13788 pEvtRec->u.IOPortStrRead.Port = Port;
13789 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
13790 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
13791 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13792 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13793}
13794
13795
13796VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
13797{
13798 PVMCPU pVCpu = VMMGetCpu(pVM);
13799 if (!pVCpu)
13800 return;
13801 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13802 if (!pEvtRec)
13803 return;
13804 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
13805 pEvtRec->u.IOPortStrWrite.Port = Port;
13806 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
13807 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
13808 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13809 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13810}
13811
13812
13813/**
13814 * Fakes and records an I/O port read.
13815 *
13816 * @returns VINF_SUCCESS.
13817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13818 * @param Port The I/O port.
13819 * @param pu32Value Where to store the fake value.
13820 * @param cbValue The size of the access.
13821 */
13822IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13823{
13824 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13825 if (pEvtRec)
13826 {
13827 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13828 pEvtRec->u.IOPortRead.Port = Port;
13829 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13830 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13831 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13832 }
13833 pVCpu->iem.s.cIOReads++;
13834 *pu32Value = 0xcccccccc;
13835 return VINF_SUCCESS;
13836}
13837
13838
13839/**
13840 * Fakes and records an I/O port write.
13841 *
13842 * @returns VINF_SUCCESS.
13843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13844 * @param Port The I/O port.
13845 * @param u32Value The value being written.
13846 * @param cbValue The size of the access.
13847 */
13848IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13849{
13850 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13851 if (pEvtRec)
13852 {
13853 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13854 pEvtRec->u.IOPortWrite.Port = Port;
13855 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13856 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13857 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13858 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13859 }
13860 pVCpu->iem.s.cIOWrites++;
13861 return VINF_SUCCESS;
13862}
13863
13864
13865/**
13866 * Used to add extra details about a stub case.
13867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13868 */
13869IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
13870{
13871 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13872 PVM pVM = pVCpu->CTX_SUFF(pVM);
13873 PVMCPU pVCpu = pVCpu;
13874 char szRegs[4096];
13875 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
13876 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
13877 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
13878 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
13879 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
13880 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
13881 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
13882 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
13883 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
13884 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
13885 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
13886 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
13887 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
13888 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
13889 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
13890 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
13891 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
13892 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
13893 " efer=%016VR{efer}\n"
13894 " pat=%016VR{pat}\n"
13895 " sf_mask=%016VR{sf_mask}\n"
13896 "krnl_gs_base=%016VR{krnl_gs_base}\n"
13897 " lstar=%016VR{lstar}\n"
13898 " star=%016VR{star} cstar=%016VR{cstar}\n"
13899 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
13900 );
13901
13902 char szInstr1[256];
13903 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
13904 DBGF_DISAS_FLAGS_DEFAULT_MODE,
13905 szInstr1, sizeof(szInstr1), NULL);
13906 char szInstr2[256];
13907 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
13908 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13909 szInstr2, sizeof(szInstr2), NULL);
13910
13911 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
13912}
13913
13914
13915/**
13916 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
13917 * dump to the assertion info.
13918 *
13919 * @param pEvtRec The record to dump.
13920 */
13921IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
13922{
13923 switch (pEvtRec->enmEvent)
13924 {
13925 case IEMVERIFYEVENT_IOPORT_READ:
13926 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
13927 pEvtRec->u.IOPortWrite.Port,
13928 pEvtRec->u.IOPortWrite.cbValue);
13929 break;
13930 case IEMVERIFYEVENT_IOPORT_WRITE:
13931 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
13932 pEvtRec->u.IOPortWrite.Port,
13933 pEvtRec->u.IOPortWrite.cbValue,
13934 pEvtRec->u.IOPortWrite.u32Value);
13935 break;
13936 case IEMVERIFYEVENT_IOPORT_STR_READ:
13937 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
13938 pEvtRec->u.IOPortStrWrite.Port,
13939 pEvtRec->u.IOPortStrWrite.cbValue,
13940 pEvtRec->u.IOPortStrWrite.cTransfers);
13941 break;
13942 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13943 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
13944 pEvtRec->u.IOPortStrWrite.Port,
13945 pEvtRec->u.IOPortStrWrite.cbValue,
13946 pEvtRec->u.IOPortStrWrite.cTransfers);
13947 break;
13948 case IEMVERIFYEVENT_RAM_READ:
13949 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
13950 pEvtRec->u.RamRead.GCPhys,
13951 pEvtRec->u.RamRead.cb);
13952 break;
13953 case IEMVERIFYEVENT_RAM_WRITE:
13954 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
13955 pEvtRec->u.RamWrite.GCPhys,
13956 pEvtRec->u.RamWrite.cb,
13957 (int)pEvtRec->u.RamWrite.cb,
13958 pEvtRec->u.RamWrite.ab);
13959 break;
13960 default:
13961 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
13962 break;
13963 }
13964}
13965
13966
13967/**
13968 * Raises an assertion on the specified record, showing the given message with
13969 * a record dump attached.
13970 *
13971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13972 * @param pEvtRec1 The first record.
13973 * @param pEvtRec2 The second record.
13974 * @param pszMsg The message explaining why we're asserting.
13975 */
13976IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13977{
13978 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13979 iemVerifyAssertAddRecordDump(pEvtRec1);
13980 iemVerifyAssertAddRecordDump(pEvtRec2);
13981 iemVerifyAssertMsg2(pVCpu);
13982 RTAssertPanic();
13983}
13984
13985
13986/**
13987 * Raises an assertion on the specified record, showing the given message with
13988 * a record dump attached.
13989 *
13990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13991 * @param pEvtRec1 The first record.
13992 * @param pszMsg The message explaining why we're asserting.
13993 */
13994IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13995{
13996 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13997 iemVerifyAssertAddRecordDump(pEvtRec);
13998 iemVerifyAssertMsg2(pVCpu);
13999 RTAssertPanic();
14000}
14001
14002
14003/**
14004 * Verifies a write record.
14005 *
14006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14007 * @param pEvtRec The write record.
14008 * @param fRem Set if REM was doing the other executing. If clear
14009 * it was HM.
14010 */
14011IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14012{
14013 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14014 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14015 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14016 if ( RT_FAILURE(rc)
14017 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14018 {
14019 /* fend off ins */
14020 if ( !pVCpu->iem.s.cIOReads
14021 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14022 || ( pEvtRec->u.RamWrite.cb != 1
14023 && pEvtRec->u.RamWrite.cb != 2
14024 && pEvtRec->u.RamWrite.cb != 4) )
14025 {
14026 /* fend off ROMs and MMIO */
14027 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14028 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14029 {
14030 /* fend off fxsave */
14031 if (pEvtRec->u.RamWrite.cb != 512)
14032 {
14033 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14034 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14035 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14036 RTAssertMsg2Add("%s: %.*Rhxs\n"
14037 "iem: %.*Rhxs\n",
14038 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14039 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14040 iemVerifyAssertAddRecordDump(pEvtRec);
14041 iemVerifyAssertMsg2(pVCpu);
14042 RTAssertPanic();
14043 }
14044 }
14045 }
14046 }
14047
14048}
14049
14050/**
14051 * Performs the post-execution verfication checks.
14052 */
14053IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14054{
14055 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14056 return rcStrictIem;
14057
14058 /*
14059 * Switch back the state.
14060 */
14061 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14062 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14063 Assert(pOrgCtx != pDebugCtx);
14064 IEM_GET_CTX(pVCpu) = pOrgCtx;
14065
14066 /*
14067 * Execute the instruction in REM.
14068 */
14069 bool fRem = false;
14070 PVM pVM = pVCpu->CTX_SUFF(pVM);
14071 PVMCPU pVCpu = pVCpu;
14072 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14073#ifdef IEM_VERIFICATION_MODE_FULL_HM
14074 if ( HMIsEnabled(pVM)
14075 && pVCpu->iem.s.cIOReads == 0
14076 && pVCpu->iem.s.cIOWrites == 0
14077 && !pVCpu->iem.s.fProblematicMemory)
14078 {
14079 uint64_t uStartRip = pOrgCtx->rip;
14080 unsigned iLoops = 0;
14081 do
14082 {
14083 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14084 iLoops++;
14085 } while ( rc == VINF_SUCCESS
14086 || ( rc == VINF_EM_DBG_STEPPED
14087 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14088 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14089 || ( pOrgCtx->rip != pDebugCtx->rip
14090 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14091 && iLoops < 8) );
14092 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14093 rc = VINF_SUCCESS;
14094 }
14095#endif
14096 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14097 || rc == VINF_IOM_R3_IOPORT_READ
14098 || rc == VINF_IOM_R3_IOPORT_WRITE
14099 || rc == VINF_IOM_R3_MMIO_READ
14100 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14101 || rc == VINF_IOM_R3_MMIO_WRITE
14102 || rc == VINF_CPUM_R3_MSR_READ
14103 || rc == VINF_CPUM_R3_MSR_WRITE
14104 || rc == VINF_EM_RESCHEDULE
14105 )
14106 {
14107 EMRemLock(pVM);
14108 rc = REMR3EmulateInstruction(pVM, pVCpu);
14109 AssertRC(rc);
14110 EMRemUnlock(pVM);
14111 fRem = true;
14112 }
14113
14114# if 1 /* Skip unimplemented instructions for now. */
14115 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14116 {
14117 IEM_GET_CTX(pVCpu) = pOrgCtx;
14118 if (rc == VINF_EM_DBG_STEPPED)
14119 return VINF_SUCCESS;
14120 return rc;
14121 }
14122# endif
14123
14124 /*
14125 * Compare the register states.
14126 */
14127 unsigned cDiffs = 0;
14128 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14129 {
14130 //Log(("REM and IEM ends up with different registers!\n"));
14131 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14132
14133# define CHECK_FIELD(a_Field) \
14134 do \
14135 { \
14136 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14137 { \
14138 switch (sizeof(pOrgCtx->a_Field)) \
14139 { \
14140 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14141 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14142 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14143 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14144 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14145 } \
14146 cDiffs++; \
14147 } \
14148 } while (0)
14149# define CHECK_XSTATE_FIELD(a_Field) \
14150 do \
14151 { \
14152 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14153 { \
14154 switch (sizeof(pOrgXState->a_Field)) \
14155 { \
14156 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14157 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14158 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14159 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14160 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14161 } \
14162 cDiffs++; \
14163 } \
14164 } while (0)
14165
14166# define CHECK_BIT_FIELD(a_Field) \
14167 do \
14168 { \
14169 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14170 { \
14171 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14172 cDiffs++; \
14173 } \
14174 } while (0)
14175
14176# define CHECK_SEL(a_Sel) \
14177 do \
14178 { \
14179 CHECK_FIELD(a_Sel.Sel); \
14180 CHECK_FIELD(a_Sel.Attr.u); \
14181 CHECK_FIELD(a_Sel.u64Base); \
14182 CHECK_FIELD(a_Sel.u32Limit); \
14183 CHECK_FIELD(a_Sel.fFlags); \
14184 } while (0)
14185
14186 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14187 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14188
14189#if 1 /* The recompiler doesn't update these the intel way. */
14190 if (fRem)
14191 {
14192 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14193 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14194 pOrgXState->x87.CS = pDebugXState->x87.CS;
14195 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14196 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14197 pOrgXState->x87.DS = pDebugXState->x87.DS;
14198 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14199 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14200 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14201 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14202 }
14203#endif
14204 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14205 {
14206 RTAssertMsg2Weak(" the FPU state differs\n");
14207 cDiffs++;
14208 CHECK_XSTATE_FIELD(x87.FCW);
14209 CHECK_XSTATE_FIELD(x87.FSW);
14210 CHECK_XSTATE_FIELD(x87.FTW);
14211 CHECK_XSTATE_FIELD(x87.FOP);
14212 CHECK_XSTATE_FIELD(x87.FPUIP);
14213 CHECK_XSTATE_FIELD(x87.CS);
14214 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14215 CHECK_XSTATE_FIELD(x87.FPUDP);
14216 CHECK_XSTATE_FIELD(x87.DS);
14217 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14218 CHECK_XSTATE_FIELD(x87.MXCSR);
14219 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14220 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14221 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14222 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14223 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14224 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14225 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14226 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14227 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14228 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14229 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14230 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14231 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14232 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14233 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14234 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14235 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14236 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14237 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14238 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14239 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14240 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14241 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14242 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14243 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14244 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14245 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14246 }
14247 CHECK_FIELD(rip);
14248 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14249 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14250 {
14251 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14252 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14253 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14254 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14255 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14256 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14257 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14258 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14259 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14260 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14261 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14262 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14263 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14264 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14265 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14266 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14267 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14268 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14269 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14270 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14271 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14272 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14273 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14274 }
14275
14276 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14277 CHECK_FIELD(rax);
14278 CHECK_FIELD(rcx);
14279 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14280 CHECK_FIELD(rdx);
14281 CHECK_FIELD(rbx);
14282 CHECK_FIELD(rsp);
14283 CHECK_FIELD(rbp);
14284 CHECK_FIELD(rsi);
14285 CHECK_FIELD(rdi);
14286 CHECK_FIELD(r8);
14287 CHECK_FIELD(r9);
14288 CHECK_FIELD(r10);
14289 CHECK_FIELD(r11);
14290 CHECK_FIELD(r12);
14291 CHECK_FIELD(r13);
14292 CHECK_SEL(cs);
14293 CHECK_SEL(ss);
14294 CHECK_SEL(ds);
14295 CHECK_SEL(es);
14296 CHECK_SEL(fs);
14297 CHECK_SEL(gs);
14298 CHECK_FIELD(cr0);
14299
14300 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14301 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14302 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14303 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14304 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14305 {
14306 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14307 { /* ignore */ }
14308 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14309 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14310 && fRem)
14311 { /* ignore */ }
14312 else
14313 CHECK_FIELD(cr2);
14314 }
14315 CHECK_FIELD(cr3);
14316 CHECK_FIELD(cr4);
14317 CHECK_FIELD(dr[0]);
14318 CHECK_FIELD(dr[1]);
14319 CHECK_FIELD(dr[2]);
14320 CHECK_FIELD(dr[3]);
14321 CHECK_FIELD(dr[6]);
14322 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14323 CHECK_FIELD(dr[7]);
14324 CHECK_FIELD(gdtr.cbGdt);
14325 CHECK_FIELD(gdtr.pGdt);
14326 CHECK_FIELD(idtr.cbIdt);
14327 CHECK_FIELD(idtr.pIdt);
14328 CHECK_SEL(ldtr);
14329 CHECK_SEL(tr);
14330 CHECK_FIELD(SysEnter.cs);
14331 CHECK_FIELD(SysEnter.eip);
14332 CHECK_FIELD(SysEnter.esp);
14333 CHECK_FIELD(msrEFER);
14334 CHECK_FIELD(msrSTAR);
14335 CHECK_FIELD(msrPAT);
14336 CHECK_FIELD(msrLSTAR);
14337 CHECK_FIELD(msrCSTAR);
14338 CHECK_FIELD(msrSFMASK);
14339 CHECK_FIELD(msrKERNELGSBASE);
14340
14341 if (cDiffs != 0)
14342 {
14343 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14344 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14345 RTAssertPanic();
14346 static bool volatile s_fEnterDebugger = true;
14347 if (s_fEnterDebugger)
14348 DBGFSTOP(pVM);
14349
14350# if 1 /* Ignore unimplemented instructions for now. */
14351 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14352 rcStrictIem = VINF_SUCCESS;
14353# endif
14354 }
14355# undef CHECK_FIELD
14356# undef CHECK_BIT_FIELD
14357 }
14358
14359 /*
14360 * If the register state compared fine, check the verification event
14361 * records.
14362 */
14363 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14364 {
14365 /*
14366 * Compare verficiation event records.
14367 * - I/O port accesses should be a 1:1 match.
14368 */
14369 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14370 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14371 while (pIemRec && pOtherRec)
14372 {
14373 /* Since we might miss RAM writes and reads, ignore reads and check
14374 that any written memory is the same extra ones. */
14375 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14376 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14377 && pIemRec->pNext)
14378 {
14379 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14380 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14381 pIemRec = pIemRec->pNext;
14382 }
14383
14384 /* Do the compare. */
14385 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14386 {
14387 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14388 break;
14389 }
14390 bool fEquals;
14391 switch (pIemRec->enmEvent)
14392 {
14393 case IEMVERIFYEVENT_IOPORT_READ:
14394 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14395 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14396 break;
14397 case IEMVERIFYEVENT_IOPORT_WRITE:
14398 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14399 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14400 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14401 break;
14402 case IEMVERIFYEVENT_IOPORT_STR_READ:
14403 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14404 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14405 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14406 break;
14407 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14408 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14409 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14410 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14411 break;
14412 case IEMVERIFYEVENT_RAM_READ:
14413 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14414 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14415 break;
14416 case IEMVERIFYEVENT_RAM_WRITE:
14417 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14418 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14419 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14420 break;
14421 default:
14422 fEquals = false;
14423 break;
14424 }
14425 if (!fEquals)
14426 {
14427 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14428 break;
14429 }
14430
14431 /* advance */
14432 pIemRec = pIemRec->pNext;
14433 pOtherRec = pOtherRec->pNext;
14434 }
14435
14436 /* Ignore extra writes and reads. */
14437 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14438 {
14439 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14440 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14441 pIemRec = pIemRec->pNext;
14442 }
14443 if (pIemRec != NULL)
14444 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14445 else if (pOtherRec != NULL)
14446 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14447 }
14448 IEM_GET_CTX(pVCpu) = pOrgCtx;
14449
14450 return rcStrictIem;
14451}
14452
14453#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14454
14455/* stubs */
14456IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14457{
14458 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14459 return VERR_INTERNAL_ERROR;
14460}
14461
14462IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14463{
14464 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14465 return VERR_INTERNAL_ERROR;
14466}
14467
14468#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14469
14470
14471#ifdef LOG_ENABLED
14472/**
14473 * Logs the current instruction.
14474 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14475 * @param pCtx The current CPU context.
14476 * @param fSameCtx Set if we have the same context information as the VMM,
14477 * clear if we may have already executed an instruction in
14478 * our debug context. When clear, we assume IEMCPU holds
14479 * valid CPU mode info.
14480 */
14481IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14482{
14483# ifdef IN_RING3
14484 if (LogIs2Enabled())
14485 {
14486 char szInstr[256];
14487 uint32_t cbInstr = 0;
14488 if (fSameCtx)
14489 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14490 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14491 szInstr, sizeof(szInstr), &cbInstr);
14492 else
14493 {
14494 uint32_t fFlags = 0;
14495 switch (pVCpu->iem.s.enmCpuMode)
14496 {
14497 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14498 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14499 case IEMMODE_16BIT:
14500 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14501 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14502 else
14503 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14504 break;
14505 }
14506 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14507 szInstr, sizeof(szInstr), &cbInstr);
14508 }
14509
14510 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14511 Log2(("****\n"
14512 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14513 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14514 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14515 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14516 " %s\n"
14517 ,
14518 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14519 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14520 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14521 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14522 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14523 szInstr));
14524
14525 if (LogIs3Enabled())
14526 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14527 }
14528 else
14529# endif
14530 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14531 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14532 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14533}
14534#endif
14535
14536
14537/**
14538 * Makes status code addjustments (pass up from I/O and access handler)
14539 * as well as maintaining statistics.
14540 *
14541 * @returns Strict VBox status code to pass up.
14542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14543 * @param rcStrict The status from executing an instruction.
14544 */
14545DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14546{
14547 if (rcStrict != VINF_SUCCESS)
14548 {
14549 if (RT_SUCCESS(rcStrict))
14550 {
14551 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14552 || rcStrict == VINF_IOM_R3_IOPORT_READ
14553 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14554 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14555 || rcStrict == VINF_IOM_R3_MMIO_READ
14556 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14557 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14558 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14559 || rcStrict == VINF_CPUM_R3_MSR_READ
14560 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14561 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14562 || rcStrict == VINF_EM_RAW_TO_R3
14563 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14564 /* raw-mode / virt handlers only: */
14565 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14566 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14567 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14568 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14569 || rcStrict == VINF_SELM_SYNC_GDT
14570 || rcStrict == VINF_CSAM_PENDING_ACTION
14571 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14572 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14573/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14574 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14575 if (rcPassUp == VINF_SUCCESS)
14576 pVCpu->iem.s.cRetInfStatuses++;
14577 else if ( rcPassUp < VINF_EM_FIRST
14578 || rcPassUp > VINF_EM_LAST
14579 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14580 {
14581 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14582 pVCpu->iem.s.cRetPassUpStatus++;
14583 rcStrict = rcPassUp;
14584 }
14585 else
14586 {
14587 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14588 pVCpu->iem.s.cRetInfStatuses++;
14589 }
14590 }
14591 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14592 pVCpu->iem.s.cRetAspectNotImplemented++;
14593 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14594 pVCpu->iem.s.cRetInstrNotImplemented++;
14595#ifdef IEM_VERIFICATION_MODE_FULL
14596 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14597 rcStrict = VINF_SUCCESS;
14598#endif
14599 else
14600 pVCpu->iem.s.cRetErrStatuses++;
14601 }
14602 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14603 {
14604 pVCpu->iem.s.cRetPassUpStatus++;
14605 rcStrict = pVCpu->iem.s.rcPassUp;
14606 }
14607
14608 return rcStrict;
14609}
14610
14611
14612/**
14613 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14614 * IEMExecOneWithPrefetchedByPC.
14615 *
14616 * Similar code is found in IEMExecLots.
14617 *
14618 * @return Strict VBox status code.
14619 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14621 * @param fExecuteInhibit If set, execute the instruction following CLI,
14622 * POP SS and MOV SS,GR.
14623 */
14624DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14625{
14626#ifdef IEM_WITH_SETJMP
14627 VBOXSTRICTRC rcStrict;
14628 jmp_buf JmpBuf;
14629 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14630 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14631 if ((rcStrict = setjmp(JmpBuf)) == 0)
14632 {
14633 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14634 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14635 }
14636 else
14637 pVCpu->iem.s.cLongJumps++;
14638 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14639#else
14640 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14641 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14642#endif
14643 if (rcStrict == VINF_SUCCESS)
14644 pVCpu->iem.s.cInstructions++;
14645 if (pVCpu->iem.s.cActiveMappings > 0)
14646 {
14647 Assert(rcStrict != VINF_SUCCESS);
14648 iemMemRollback(pVCpu);
14649 }
14650//#ifdef DEBUG
14651// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14652//#endif
14653
14654 /* Execute the next instruction as well if a cli, pop ss or
14655 mov ss, Gr has just completed successfully. */
14656 if ( fExecuteInhibit
14657 && rcStrict == VINF_SUCCESS
14658 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14659 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14660 {
14661 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14662 if (rcStrict == VINF_SUCCESS)
14663 {
14664#ifdef LOG_ENABLED
14665 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14666#endif
14667#ifdef IEM_WITH_SETJMP
14668 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14669 if ((rcStrict = setjmp(JmpBuf)) == 0)
14670 {
14671 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14672 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14673 }
14674 else
14675 pVCpu->iem.s.cLongJumps++;
14676 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14677#else
14678 IEM_OPCODE_GET_NEXT_U8(&b);
14679 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14680#endif
14681 if (rcStrict == VINF_SUCCESS)
14682 pVCpu->iem.s.cInstructions++;
14683 if (pVCpu->iem.s.cActiveMappings > 0)
14684 {
14685 Assert(rcStrict != VINF_SUCCESS);
14686 iemMemRollback(pVCpu);
14687 }
14688 }
14689 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14690 }
14691
14692 /*
14693 * Return value fiddling, statistics and sanity assertions.
14694 */
14695 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14696
14697 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14698 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14699#if defined(IEM_VERIFICATION_MODE_FULL)
14700 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14701 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14702 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14703 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14704#endif
14705 return rcStrict;
14706}
14707
14708
14709#ifdef IN_RC
14710/**
14711 * Re-enters raw-mode or ensure we return to ring-3.
14712 *
14713 * @returns rcStrict, maybe modified.
14714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14715 * @param pCtx The current CPU context.
14716 * @param rcStrict The status code returne by the interpreter.
14717 */
14718DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
14719{
14720 if ( !pVCpu->iem.s.fInPatchCode
14721 && ( rcStrict == VINF_SUCCESS
14722 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14723 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14724 {
14725 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14726 CPUMRawEnter(pVCpu);
14727 else
14728 {
14729 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14730 rcStrict = VINF_EM_RESCHEDULE;
14731 }
14732 }
14733 return rcStrict;
14734}
14735#endif
14736
14737
14738/**
14739 * Execute one instruction.
14740 *
14741 * @return Strict VBox status code.
14742 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14743 */
14744VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14745{
14746#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14747 if (++pVCpu->iem.s.cVerifyDepth == 1)
14748 iemExecVerificationModeSetup(pVCpu);
14749#endif
14750#ifdef LOG_ENABLED
14751 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14752 iemLogCurInstr(pVCpu, pCtx, true);
14753#endif
14754
14755 /*
14756 * Do the decoding and emulation.
14757 */
14758 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14759 if (rcStrict == VINF_SUCCESS)
14760 rcStrict = iemExecOneInner(pVCpu, true);
14761
14762#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14763 /*
14764 * Assert some sanity.
14765 */
14766 if (pVCpu->iem.s.cVerifyDepth == 1)
14767 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14768 pVCpu->iem.s.cVerifyDepth--;
14769#endif
14770#ifdef IN_RC
14771 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14772#endif
14773 if (rcStrict != VINF_SUCCESS)
14774 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14775 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14776 return rcStrict;
14777}
14778
14779
14780VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14781{
14782 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14783 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14784
14785 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14786 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14787 if (rcStrict == VINF_SUCCESS)
14788 {
14789 rcStrict = iemExecOneInner(pVCpu, true);
14790 if (pcbWritten)
14791 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14792 }
14793
14794#ifdef IN_RC
14795 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14796#endif
14797 return rcStrict;
14798}
14799
14800
14801VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14802 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14803{
14804 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14805 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14806
14807 VBOXSTRICTRC rcStrict;
14808 if ( cbOpcodeBytes
14809 && pCtx->rip == OpcodeBytesPC)
14810 {
14811 iemInitDecoder(pVCpu, false);
14812#ifdef IEM_WITH_CODE_TLB
14813 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14814 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14815 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14816 pVCpu->iem.s.offCurInstrStart = 0;
14817 pVCpu->iem.s.offInstrNextByte = 0;
14818#else
14819 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14820 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14821#endif
14822 rcStrict = VINF_SUCCESS;
14823 }
14824 else
14825 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14826 if (rcStrict == VINF_SUCCESS)
14827 {
14828 rcStrict = iemExecOneInner(pVCpu, true);
14829 }
14830
14831#ifdef IN_RC
14832 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14833#endif
14834 return rcStrict;
14835}
14836
14837
14838VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14839{
14840 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14841 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14842
14843 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14844 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14845 if (rcStrict == VINF_SUCCESS)
14846 {
14847 rcStrict = iemExecOneInner(pVCpu, false);
14848 if (pcbWritten)
14849 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14850 }
14851
14852#ifdef IN_RC
14853 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14854#endif
14855 return rcStrict;
14856}
14857
14858
14859VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14860 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14861{
14862 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14863 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14864
14865 VBOXSTRICTRC rcStrict;
14866 if ( cbOpcodeBytes
14867 && pCtx->rip == OpcodeBytesPC)
14868 {
14869 iemInitDecoder(pVCpu, true);
14870#ifdef IEM_WITH_CODE_TLB
14871 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14872 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14873 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14874 pVCpu->iem.s.offCurInstrStart = 0;
14875 pVCpu->iem.s.offInstrNextByte = 0;
14876#else
14877 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14878 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14879#endif
14880 rcStrict = VINF_SUCCESS;
14881 }
14882 else
14883 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14884 if (rcStrict == VINF_SUCCESS)
14885 rcStrict = iemExecOneInner(pVCpu, false);
14886
14887#ifdef IN_RC
14888 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14889#endif
14890 return rcStrict;
14891}
14892
14893
14894/**
14895 * For debugging DISGetParamSize, may come in handy.
14896 *
14897 * @returns Strict VBox status code.
14898 * @param pVCpu The cross context virtual CPU structure of the
14899 * calling EMT.
14900 * @param pCtxCore The context core structure.
14901 * @param OpcodeBytesPC The PC of the opcode bytes.
14902 * @param pvOpcodeBytes Prefeched opcode bytes.
14903 * @param cbOpcodeBytes Number of prefetched bytes.
14904 * @param pcbWritten Where to return the number of bytes written.
14905 * Optional.
14906 */
14907VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14908 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14909 uint32_t *pcbWritten)
14910{
14911 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14912 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14913
14914 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14915 VBOXSTRICTRC rcStrict;
14916 if ( cbOpcodeBytes
14917 && pCtx->rip == OpcodeBytesPC)
14918 {
14919 iemInitDecoder(pVCpu, true);
14920#ifdef IEM_WITH_CODE_TLB
14921 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14922 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14923 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14924 pVCpu->iem.s.offCurInstrStart = 0;
14925 pVCpu->iem.s.offInstrNextByte = 0;
14926#else
14927 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14928 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14929#endif
14930 rcStrict = VINF_SUCCESS;
14931 }
14932 else
14933 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14934 if (rcStrict == VINF_SUCCESS)
14935 {
14936 rcStrict = iemExecOneInner(pVCpu, false);
14937 if (pcbWritten)
14938 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14939 }
14940
14941#ifdef IN_RC
14942 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14943#endif
14944 return rcStrict;
14945}
14946
14947
14948VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14949{
14950 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14951
14952#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14953 /*
14954 * See if there is an interrupt pending in TRPM, inject it if we can.
14955 */
14956 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14957# ifdef IEM_VERIFICATION_MODE_FULL
14958 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14959# endif
14960 if ( pCtx->eflags.Bits.u1IF
14961 && TRPMHasTrap(pVCpu)
14962 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14963 {
14964 uint8_t u8TrapNo;
14965 TRPMEVENT enmType;
14966 RTGCUINT uErrCode;
14967 RTGCPTR uCr2;
14968 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14969 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14970 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14971 TRPMResetTrap(pVCpu);
14972 }
14973
14974 /*
14975 * Log the state.
14976 */
14977# ifdef LOG_ENABLED
14978 iemLogCurInstr(pVCpu, pCtx, true);
14979# endif
14980
14981 /*
14982 * Do the decoding and emulation.
14983 */
14984 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14985 if (rcStrict == VINF_SUCCESS)
14986 rcStrict = iemExecOneInner(pVCpu, true);
14987
14988 /*
14989 * Assert some sanity.
14990 */
14991 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14992
14993 /*
14994 * Log and return.
14995 */
14996 if (rcStrict != VINF_SUCCESS)
14997 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14998 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14999 if (pcInstructions)
15000 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15001 return rcStrict;
15002
15003#else /* Not verification mode */
15004
15005 /*
15006 * See if there is an interrupt pending in TRPM, inject it if we can.
15007 */
15008 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15009# ifdef IEM_VERIFICATION_MODE_FULL
15010 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15011# endif
15012 if ( pCtx->eflags.Bits.u1IF
15013 && TRPMHasTrap(pVCpu)
15014 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15015 {
15016 uint8_t u8TrapNo;
15017 TRPMEVENT enmType;
15018 RTGCUINT uErrCode;
15019 RTGCPTR uCr2;
15020 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15021 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15022 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15023 TRPMResetTrap(pVCpu);
15024 }
15025
15026 /*
15027 * Initial decoder init w/ prefetch, then setup setjmp.
15028 */
15029 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15030 if (rcStrict == VINF_SUCCESS)
15031 {
15032# ifdef IEM_WITH_SETJMP
15033 jmp_buf JmpBuf;
15034 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15035 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15036 pVCpu->iem.s.cActiveMappings = 0;
15037 if ((rcStrict = setjmp(JmpBuf)) == 0)
15038# endif
15039 {
15040 /*
15041 * The run loop. We limit ourselves to 4096 instructions right now.
15042 */
15043 PVM pVM = pVCpu->CTX_SUFF(pVM);
15044 uint32_t cInstr = 4096;
15045 for (;;)
15046 {
15047 /*
15048 * Log the state.
15049 */
15050# ifdef LOG_ENABLED
15051 iemLogCurInstr(pVCpu, pCtx, true);
15052# endif
15053
15054 /*
15055 * Do the decoding and emulation.
15056 */
15057 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15058 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15059 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15060 {
15061 Assert(pVCpu->iem.s.cActiveMappings == 0);
15062 pVCpu->iem.s.cInstructions++;
15063 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15064 {
15065 uint32_t fCpu = pVCpu->fLocalForcedActions
15066 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15067 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15068 | VMCPU_FF_TLB_FLUSH
15069# ifdef VBOX_WITH_RAW_MODE
15070 | VMCPU_FF_TRPM_SYNC_IDT
15071 | VMCPU_FF_SELM_SYNC_TSS
15072 | VMCPU_FF_SELM_SYNC_GDT
15073 | VMCPU_FF_SELM_SYNC_LDT
15074# endif
15075 | VMCPU_FF_INHIBIT_INTERRUPTS
15076 | VMCPU_FF_BLOCK_NMIS
15077 | VMCPU_FF_UNHALT ));
15078
15079 if (RT_LIKELY( ( !fCpu
15080 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15081 && !pCtx->rflags.Bits.u1IF) )
15082 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15083 {
15084 if (cInstr-- > 0)
15085 {
15086 Assert(pVCpu->iem.s.cActiveMappings == 0);
15087 iemReInitDecoder(pVCpu);
15088 continue;
15089 }
15090 }
15091 }
15092 Assert(pVCpu->iem.s.cActiveMappings == 0);
15093 }
15094 else if (pVCpu->iem.s.cActiveMappings > 0)
15095 iemMemRollback(pVCpu);
15096 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15097 break;
15098 }
15099 }
15100# ifdef IEM_WITH_SETJMP
15101 else
15102 {
15103 if (pVCpu->iem.s.cActiveMappings > 0)
15104 iemMemRollback(pVCpu);
15105 pVCpu->iem.s.cLongJumps++;
15106 }
15107 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15108# endif
15109
15110 /*
15111 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15112 */
15113 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15114 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15115# if defined(IEM_VERIFICATION_MODE_FULL)
15116 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15117 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15118 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15119 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15120# endif
15121 }
15122
15123 /*
15124 * Maybe re-enter raw-mode and log.
15125 */
15126# ifdef IN_RC
15127 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15128# endif
15129 if (rcStrict != VINF_SUCCESS)
15130 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15131 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15132 if (pcInstructions)
15133 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15134 return rcStrict;
15135#endif /* Not verification mode */
15136}
15137
15138
15139
15140/**
15141 * Injects a trap, fault, abort, software interrupt or external interrupt.
15142 *
15143 * The parameter list matches TRPMQueryTrapAll pretty closely.
15144 *
15145 * @returns Strict VBox status code.
15146 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15147 * @param u8TrapNo The trap number.
15148 * @param enmType What type is it (trap/fault/abort), software
15149 * interrupt or hardware interrupt.
15150 * @param uErrCode The error code if applicable.
15151 * @param uCr2 The CR2 value if applicable.
15152 * @param cbInstr The instruction length (only relevant for
15153 * software interrupts).
15154 */
15155VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15156 uint8_t cbInstr)
15157{
15158 iemInitDecoder(pVCpu, false);
15159#ifdef DBGFTRACE_ENABLED
15160 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15161 u8TrapNo, enmType, uErrCode, uCr2);
15162#endif
15163
15164 uint32_t fFlags;
15165 switch (enmType)
15166 {
15167 case TRPM_HARDWARE_INT:
15168 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15169 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15170 uErrCode = uCr2 = 0;
15171 break;
15172
15173 case TRPM_SOFTWARE_INT:
15174 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15175 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15176 uErrCode = uCr2 = 0;
15177 break;
15178
15179 case TRPM_TRAP:
15180 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15181 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15182 if (u8TrapNo == X86_XCPT_PF)
15183 fFlags |= IEM_XCPT_FLAGS_CR2;
15184 switch (u8TrapNo)
15185 {
15186 case X86_XCPT_DF:
15187 case X86_XCPT_TS:
15188 case X86_XCPT_NP:
15189 case X86_XCPT_SS:
15190 case X86_XCPT_PF:
15191 case X86_XCPT_AC:
15192 fFlags |= IEM_XCPT_FLAGS_ERR;
15193 break;
15194
15195 case X86_XCPT_NMI:
15196 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15197 break;
15198 }
15199 break;
15200
15201 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15202 }
15203
15204 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15205}
15206
15207
15208/**
15209 * Injects the active TRPM event.
15210 *
15211 * @returns Strict VBox status code.
15212 * @param pVCpu The cross context virtual CPU structure.
15213 */
15214VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15215{
15216#ifndef IEM_IMPLEMENTS_TASKSWITCH
15217 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15218#else
15219 uint8_t u8TrapNo;
15220 TRPMEVENT enmType;
15221 RTGCUINT uErrCode;
15222 RTGCUINTPTR uCr2;
15223 uint8_t cbInstr;
15224 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15225 if (RT_FAILURE(rc))
15226 return rc;
15227
15228 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15229
15230 /** @todo Are there any other codes that imply the event was successfully
15231 * delivered to the guest? See @bugref{6607}. */
15232 if ( rcStrict == VINF_SUCCESS
15233 || rcStrict == VINF_IEM_RAISED_XCPT)
15234 {
15235 TRPMResetTrap(pVCpu);
15236 }
15237 return rcStrict;
15238#endif
15239}
15240
15241
15242VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15243{
15244 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15245 return VERR_NOT_IMPLEMENTED;
15246}
15247
15248
15249VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15250{
15251 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15252 return VERR_NOT_IMPLEMENTED;
15253}
15254
15255
15256#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15257/**
15258 * Executes a IRET instruction with default operand size.
15259 *
15260 * This is for PATM.
15261 *
15262 * @returns VBox status code.
15263 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15264 * @param pCtxCore The register frame.
15265 */
15266VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15267{
15268 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15269
15270 iemCtxCoreToCtx(pCtx, pCtxCore);
15271 iemInitDecoder(pVCpu);
15272 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15273 if (rcStrict == VINF_SUCCESS)
15274 iemCtxToCtxCore(pCtxCore, pCtx);
15275 else
15276 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15277 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15278 return rcStrict;
15279}
15280#endif
15281
15282
15283/**
15284 * Macro used by the IEMExec* method to check the given instruction length.
15285 *
15286 * Will return on failure!
15287 *
15288 * @param a_cbInstr The given instruction length.
15289 * @param a_cbMin The minimum length.
15290 */
15291#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15292 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15293 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15294
15295
15296/**
15297 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15298 *
15299 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15300 *
15301 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15303 * @param rcStrict The status code to fiddle.
15304 */
15305DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15306{
15307 iemUninitExec(pVCpu);
15308#ifdef IN_RC
15309 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15310 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15311#else
15312 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15313#endif
15314}
15315
15316
15317/**
15318 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15319 *
15320 * This API ASSUMES that the caller has already verified that the guest code is
15321 * allowed to access the I/O port. (The I/O port is in the DX register in the
15322 * guest state.)
15323 *
15324 * @returns Strict VBox status code.
15325 * @param pVCpu The cross context virtual CPU structure.
15326 * @param cbValue The size of the I/O port access (1, 2, or 4).
15327 * @param enmAddrMode The addressing mode.
15328 * @param fRepPrefix Indicates whether a repeat prefix is used
15329 * (doesn't matter which for this instruction).
15330 * @param cbInstr The instruction length in bytes.
15331 * @param iEffSeg The effective segment address.
15332 * @param fIoChecked Whether the access to the I/O port has been
15333 * checked or not. It's typically checked in the
15334 * HM scenario.
15335 */
15336VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15337 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15338{
15339 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15340 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15341
15342 /*
15343 * State init.
15344 */
15345 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15346
15347 /*
15348 * Switch orgy for getting to the right handler.
15349 */
15350 VBOXSTRICTRC rcStrict;
15351 if (fRepPrefix)
15352 {
15353 switch (enmAddrMode)
15354 {
15355 case IEMMODE_16BIT:
15356 switch (cbValue)
15357 {
15358 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15359 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15360 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15361 default:
15362 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15363 }
15364 break;
15365
15366 case IEMMODE_32BIT:
15367 switch (cbValue)
15368 {
15369 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15370 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15371 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15372 default:
15373 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15374 }
15375 break;
15376
15377 case IEMMODE_64BIT:
15378 switch (cbValue)
15379 {
15380 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15381 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15382 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15383 default:
15384 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15385 }
15386 break;
15387
15388 default:
15389 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15390 }
15391 }
15392 else
15393 {
15394 switch (enmAddrMode)
15395 {
15396 case IEMMODE_16BIT:
15397 switch (cbValue)
15398 {
15399 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15400 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15401 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15402 default:
15403 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15404 }
15405 break;
15406
15407 case IEMMODE_32BIT:
15408 switch (cbValue)
15409 {
15410 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15411 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15412 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15413 default:
15414 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15415 }
15416 break;
15417
15418 case IEMMODE_64BIT:
15419 switch (cbValue)
15420 {
15421 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15422 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15423 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15424 default:
15425 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15426 }
15427 break;
15428
15429 default:
15430 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15431 }
15432 }
15433
15434 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15435}
15436
15437
15438/**
15439 * Interface for HM and EM for executing string I/O IN (read) instructions.
15440 *
15441 * This API ASSUMES that the caller has already verified that the guest code is
15442 * allowed to access the I/O port. (The I/O port is in the DX register in the
15443 * guest state.)
15444 *
15445 * @returns Strict VBox status code.
15446 * @param pVCpu The cross context virtual CPU structure.
15447 * @param cbValue The size of the I/O port access (1, 2, or 4).
15448 * @param enmAddrMode The addressing mode.
15449 * @param fRepPrefix Indicates whether a repeat prefix is used
15450 * (doesn't matter which for this instruction).
15451 * @param cbInstr The instruction length in bytes.
15452 * @param fIoChecked Whether the access to the I/O port has been
15453 * checked or not. It's typically checked in the
15454 * HM scenario.
15455 */
15456VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15457 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15458{
15459 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15460
15461 /*
15462 * State init.
15463 */
15464 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15465
15466 /*
15467 * Switch orgy for getting to the right handler.
15468 */
15469 VBOXSTRICTRC rcStrict;
15470 if (fRepPrefix)
15471 {
15472 switch (enmAddrMode)
15473 {
15474 case IEMMODE_16BIT:
15475 switch (cbValue)
15476 {
15477 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15478 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15479 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15480 default:
15481 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15482 }
15483 break;
15484
15485 case IEMMODE_32BIT:
15486 switch (cbValue)
15487 {
15488 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15489 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15490 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15491 default:
15492 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15493 }
15494 break;
15495
15496 case IEMMODE_64BIT:
15497 switch (cbValue)
15498 {
15499 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15500 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15501 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15502 default:
15503 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15504 }
15505 break;
15506
15507 default:
15508 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15509 }
15510 }
15511 else
15512 {
15513 switch (enmAddrMode)
15514 {
15515 case IEMMODE_16BIT:
15516 switch (cbValue)
15517 {
15518 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15519 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15520 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15521 default:
15522 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15523 }
15524 break;
15525
15526 case IEMMODE_32BIT:
15527 switch (cbValue)
15528 {
15529 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15530 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15531 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15532 default:
15533 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15534 }
15535 break;
15536
15537 case IEMMODE_64BIT:
15538 switch (cbValue)
15539 {
15540 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15541 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15542 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15543 default:
15544 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15545 }
15546 break;
15547
15548 default:
15549 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15550 }
15551 }
15552
15553 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15554}
15555
15556
15557/**
15558 * Interface for rawmode to write execute an OUT instruction.
15559 *
15560 * @returns Strict VBox status code.
15561 * @param pVCpu The cross context virtual CPU structure.
15562 * @param cbInstr The instruction length in bytes.
15563 * @param u16Port The port to read.
15564 * @param cbReg The register size.
15565 *
15566 * @remarks In ring-0 not all of the state needs to be synced in.
15567 */
15568VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15569{
15570 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15571 Assert(cbReg <= 4 && cbReg != 3);
15572
15573 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15574 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15575 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15576}
15577
15578
15579/**
15580 * Interface for rawmode to write execute an IN instruction.
15581 *
15582 * @returns Strict VBox status code.
15583 * @param pVCpu The cross context virtual CPU structure.
15584 * @param cbInstr The instruction length in bytes.
15585 * @param u16Port The port to read.
15586 * @param cbReg The register size.
15587 */
15588VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15589{
15590 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15591 Assert(cbReg <= 4 && cbReg != 3);
15592
15593 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15594 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15595 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15596}
15597
15598
15599/**
15600 * Interface for HM and EM to write to a CRx register.
15601 *
15602 * @returns Strict VBox status code.
15603 * @param pVCpu The cross context virtual CPU structure.
15604 * @param cbInstr The instruction length in bytes.
15605 * @param iCrReg The control register number (destination).
15606 * @param iGReg The general purpose register number (source).
15607 *
15608 * @remarks In ring-0 not all of the state needs to be synced in.
15609 */
15610VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15611{
15612 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15613 Assert(iCrReg < 16);
15614 Assert(iGReg < 16);
15615
15616 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15617 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15618 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15619}
15620
15621
15622/**
15623 * Interface for HM and EM to read from a CRx register.
15624 *
15625 * @returns Strict VBox status code.
15626 * @param pVCpu The cross context virtual CPU structure.
15627 * @param cbInstr The instruction length in bytes.
15628 * @param iGReg The general purpose register number (destination).
15629 * @param iCrReg The control register number (source).
15630 *
15631 * @remarks In ring-0 not all of the state needs to be synced in.
15632 */
15633VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15634{
15635 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15636 Assert(iCrReg < 16);
15637 Assert(iGReg < 16);
15638
15639 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15640 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15641 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15642}
15643
15644
15645/**
15646 * Interface for HM and EM to clear the CR0[TS] bit.
15647 *
15648 * @returns Strict VBox status code.
15649 * @param pVCpu The cross context virtual CPU structure.
15650 * @param cbInstr The instruction length in bytes.
15651 *
15652 * @remarks In ring-0 not all of the state needs to be synced in.
15653 */
15654VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15655{
15656 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15657
15658 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15659 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15660 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15661}
15662
15663
15664/**
15665 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15666 *
15667 * @returns Strict VBox status code.
15668 * @param pVCpu The cross context virtual CPU structure.
15669 * @param cbInstr The instruction length in bytes.
15670 * @param uValue The value to load into CR0.
15671 *
15672 * @remarks In ring-0 not all of the state needs to be synced in.
15673 */
15674VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15675{
15676 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15677
15678 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15679 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15680 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15681}
15682
15683
15684/**
15685 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15686 *
15687 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15688 *
15689 * @returns Strict VBox status code.
15690 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15691 * @param cbInstr The instruction length in bytes.
15692 * @remarks In ring-0 not all of the state needs to be synced in.
15693 * @thread EMT(pVCpu)
15694 */
15695VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15696{
15697 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15698
15699 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15700 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15701 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15702}
15703
15704
15705/**
15706 * Checks if IEM is in the process of delivering an event (interrupt or
15707 * exception).
15708 *
15709 * @returns true if we're in the process of raising an interrupt or exception,
15710 * false otherwise.
15711 * @param pVCpu The cross context virtual CPU structure.
15712 * @param puVector Where to store the vector associated with the
15713 * currently delivered event, optional.
15714 * @param pfFlags Where to store th event delivery flags (see
15715 * IEM_XCPT_FLAGS_XXX), optional.
15716 * @param puErr Where to store the error code associated with the
15717 * event, optional.
15718 * @param puCr2 Where to store the CR2 associated with the event,
15719 * optional.
15720 * @remarks The caller should check the flags to determine if the error code and
15721 * CR2 are valid for the event.
15722 */
15723VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15724{
15725 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15726 if (fRaisingXcpt)
15727 {
15728 if (puVector)
15729 *puVector = pVCpu->iem.s.uCurXcpt;
15730 if (pfFlags)
15731 *pfFlags = pVCpu->iem.s.fCurXcpt;
15732 if (puErr)
15733 *puErr = pVCpu->iem.s.uCurXcptErr;
15734 if (puCr2)
15735 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15736 }
15737 return fRaisingXcpt;
15738}
15739
15740
15741#ifdef VBOX_WITH_NESTED_HWVIRT
15742/**
15743 * Interface for HM and EM to emulate the STGI instruction.
15744 *
15745 * @returns Strict VBox status code.
15746 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15747 * @param cbInstr The instruction length in bytes.
15748 * @thread EMT(pVCpu)
15749 */
15750VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15751{
15752 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15753
15754 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15755 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15756 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15757}
15758
15759
15760/**
15761 * Interface for HM and EM to emulate the STGI instruction.
15762 *
15763 * @returns Strict VBox status code.
15764 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15765 * @param cbInstr The instruction length in bytes.
15766 * @thread EMT(pVCpu)
15767 */
15768VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15769{
15770 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15771
15772 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15773 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15774 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15775}
15776
15777
15778/**
15779 * Interface for HM and EM to emulate the VMLOAD instruction.
15780 *
15781 * @returns Strict VBox status code.
15782 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15783 * @param cbInstr The instruction length in bytes.
15784 * @thread EMT(pVCpu)
15785 */
15786VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15787{
15788 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15789
15790 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15791 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15792 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15793}
15794
15795
15796/**
15797 * Interface for HM and EM to emulate the VMSAVE instruction.
15798 *
15799 * @returns Strict VBox status code.
15800 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15801 * @param cbInstr The instruction length in bytes.
15802 * @thread EMT(pVCpu)
15803 */
15804VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15805{
15806 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15807
15808 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15809 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15810 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15811}
15812
15813
15814/**
15815 * Interface for HM and EM to emulate the INVLPGA instruction.
15816 *
15817 * @returns Strict VBox status code.
15818 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15819 * @param cbInstr The instruction length in bytes.
15820 * @thread EMT(pVCpu)
15821 */
15822VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15823{
15824 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15825
15826 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15827 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15828 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15829}
15830#endif /* VBOX_WITH_NESTED_HWVIRT */
15831
15832#ifdef IN_RING3
15833
15834/**
15835 * Handles the unlikely and probably fatal merge cases.
15836 *
15837 * @returns Merged status code.
15838 * @param rcStrict Current EM status code.
15839 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15840 * with @a rcStrict.
15841 * @param iMemMap The memory mapping index. For error reporting only.
15842 * @param pVCpu The cross context virtual CPU structure of the calling
15843 * thread, for error reporting only.
15844 */
15845DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15846 unsigned iMemMap, PVMCPU pVCpu)
15847{
15848 if (RT_FAILURE_NP(rcStrict))
15849 return rcStrict;
15850
15851 if (RT_FAILURE_NP(rcStrictCommit))
15852 return rcStrictCommit;
15853
15854 if (rcStrict == rcStrictCommit)
15855 return rcStrictCommit;
15856
15857 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15858 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15859 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15860 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15861 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15862 return VERR_IOM_FF_STATUS_IPE;
15863}
15864
15865
15866/**
15867 * Helper for IOMR3ProcessForceFlag.
15868 *
15869 * @returns Merged status code.
15870 * @param rcStrict Current EM status code.
15871 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15872 * with @a rcStrict.
15873 * @param iMemMap The memory mapping index. For error reporting only.
15874 * @param pVCpu The cross context virtual CPU structure of the calling
15875 * thread, for error reporting only.
15876 */
15877DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15878{
15879 /* Simple. */
15880 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15881 return rcStrictCommit;
15882
15883 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15884 return rcStrict;
15885
15886 /* EM scheduling status codes. */
15887 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15888 && rcStrict <= VINF_EM_LAST))
15889 {
15890 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15891 && rcStrictCommit <= VINF_EM_LAST))
15892 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15893 }
15894
15895 /* Unlikely */
15896 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15897}
15898
15899
15900/**
15901 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15902 *
15903 * @returns Merge between @a rcStrict and what the commit operation returned.
15904 * @param pVM The cross context VM structure.
15905 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15906 * @param rcStrict The status code returned by ring-0 or raw-mode.
15907 */
15908VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15909{
15910 /*
15911 * Reset the pending commit.
15912 */
15913 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15914 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15915 ("%#x %#x %#x\n",
15916 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15917 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15918
15919 /*
15920 * Commit the pending bounce buffers (usually just one).
15921 */
15922 unsigned cBufs = 0;
15923 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15924 while (iMemMap-- > 0)
15925 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15926 {
15927 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15928 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15929 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15930
15931 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15932 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15933 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15934
15935 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15936 {
15937 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15938 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15939 pbBuf,
15940 cbFirst,
15941 PGMACCESSORIGIN_IEM);
15942 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15943 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15944 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15945 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15946 }
15947
15948 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15949 {
15950 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15951 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15952 pbBuf + cbFirst,
15953 cbSecond,
15954 PGMACCESSORIGIN_IEM);
15955 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15956 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15957 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15958 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15959 }
15960 cBufs++;
15961 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15962 }
15963
15964 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15965 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15966 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15967 pVCpu->iem.s.cActiveMappings = 0;
15968 return rcStrict;
15969}
15970
15971#endif /* IN_RING3 */
15972
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette