VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 66810

Last change on this file since 66810 was 66810, checked in by vboxsync, 8 years ago

IEM: Implemented movq2dq Vdq,Nq (f3 0f d6)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 608.5 KB
Line 
1/* $Id: IEMAll.cpp 66810 2017-05-05 14:36:10Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/hm_svm.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#ifdef IEM_VERIFICATION_MODE_FULL
118# include <VBox/vmm/rem.h>
119# include <VBox/vmm/mm.h>
120#endif
121#include <VBox/vmm/vm.h>
122#include <VBox/log.h>
123#include <VBox/err.h>
124#include <VBox/param.h>
125#include <VBox/dis.h>
126#include <VBox/disopcode.h>
127#include <iprt/assert.h>
128#include <iprt/string.h>
129#include <iprt/x86.h>
130
131
132/*********************************************************************************************************************************
133* Structures and Typedefs *
134*********************************************************************************************************************************/
135/** @typedef PFNIEMOP
136 * Pointer to an opcode decoder function.
137 */
138
139/** @def FNIEMOP_DEF
140 * Define an opcode decoder function.
141 *
142 * We're using macors for this so that adding and removing parameters as well as
143 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
144 *
145 * @param a_Name The function name.
146 */
147
148/** @typedef PFNIEMOPRM
149 * Pointer to an opcode decoder function with RM byte.
150 */
151
152/** @def FNIEMOPRM_DEF
153 * Define an opcode decoder function with RM byte.
154 *
155 * We're using macors for this so that adding and removing parameters as well as
156 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
157 *
158 * @param a_Name The function name.
159 */
160
161#if defined(__GNUC__) && defined(RT_ARCH_X86)
162typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
170
171#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
172typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
174# define FNIEMOP_DEF(a_Name) \
175 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
176# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
177 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
178# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
179 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
180
181#elif defined(__GNUC__)
182typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
183typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
184# define FNIEMOP_DEF(a_Name) \
185 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
186# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
187 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
188# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
189 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
190
191#else
192typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
193typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
194# define FNIEMOP_DEF(a_Name) \
195 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
196# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
197 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
198# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
199 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
200
201#endif
202#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
203
204
205/**
206 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
207 */
208typedef union IEMSELDESC
209{
210 /** The legacy view. */
211 X86DESC Legacy;
212 /** The long mode view. */
213 X86DESC64 Long;
214} IEMSELDESC;
215/** Pointer to a selector descriptor table entry. */
216typedef IEMSELDESC *PIEMSELDESC;
217
218/**
219 * CPU exception classes.
220 */
221typedef enum IEMXCPTCLASS
222{
223 IEMXCPTCLASS_BENIGN,
224 IEMXCPTCLASS_CONTRIBUTORY,
225 IEMXCPTCLASS_PAGE_FAULT
226} IEMXCPTCLASS;
227
228
229/*********************************************************************************************************************************
230* Defined Constants And Macros *
231*********************************************************************************************************************************/
232/** @def IEM_WITH_SETJMP
233 * Enables alternative status code handling using setjmps.
234 *
235 * This adds a bit of expense via the setjmp() call since it saves all the
236 * non-volatile registers. However, it eliminates return code checks and allows
237 * for more optimal return value passing (return regs instead of stack buffer).
238 */
239#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
240# define IEM_WITH_SETJMP
241#endif
242
243/** Temporary hack to disable the double execution. Will be removed in favor
244 * of a dedicated execution mode in EM. */
245//#define IEM_VERIFICATION_MODE_NO_REM
246
247/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
248 * due to GCC lacking knowledge about the value range of a switch. */
249#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
250
251/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
252#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
253
254/**
255 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
256 * occation.
257 */
258#ifdef LOG_ENABLED
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 do { \
261 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
262 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
263 } while (0)
264#else
265# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
266 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
267#endif
268
269/**
270 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
271 * occation using the supplied logger statement.
272 *
273 * @param a_LoggerArgs What to log on failure.
274 */
275#ifdef LOG_ENABLED
276# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
277 do { \
278 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
279 /*LogFunc(a_LoggerArgs);*/ \
280 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
281 } while (0)
282#else
283# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
284 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
285#endif
286
287/**
288 * Call an opcode decoder function.
289 *
290 * We're using macors for this so that adding and removing parameters can be
291 * done as we please. See FNIEMOP_DEF.
292 */
293#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
294
295/**
296 * Call a common opcode decoder function taking one extra argument.
297 *
298 * We're using macors for this so that adding and removing parameters can be
299 * done as we please. See FNIEMOP_DEF_1.
300 */
301#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
302
303/**
304 * Call a common opcode decoder function taking one extra argument.
305 *
306 * We're using macors for this so that adding and removing parameters can be
307 * done as we please. See FNIEMOP_DEF_1.
308 */
309#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
310
311/**
312 * Check if we're currently executing in real or virtual 8086 mode.
313 *
314 * @returns @c true if it is, @c false if not.
315 * @param a_pVCpu The IEM state of the current CPU.
316 */
317#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
318
319/**
320 * Check if we're currently executing in virtual 8086 mode.
321 *
322 * @returns @c true if it is, @c false if not.
323 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
324 */
325#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
326
327/**
328 * Check if we're currently executing in long mode.
329 *
330 * @returns @c true if it is, @c false if not.
331 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
332 */
333#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
334
335/**
336 * Check if we're currently executing in real mode.
337 *
338 * @returns @c true if it is, @c false if not.
339 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
340 */
341#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
342
343/**
344 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
345 * @returns PCCPUMFEATURES
346 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
347 */
348#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
349
350/**
351 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
352 * @returns PCCPUMFEATURES
353 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
354 */
355#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
356
357/**
358 * Evaluates to true if we're presenting an Intel CPU to the guest.
359 */
360#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
361
362/**
363 * Evaluates to true if we're presenting an AMD CPU to the guest.
364 */
365#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
366
367/**
368 * Check if the address is canonical.
369 */
370#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
371
372/** @def IEM_USE_UNALIGNED_DATA_ACCESS
373 * Use unaligned accesses instead of elaborate byte assembly. */
374#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
375# define IEM_USE_UNALIGNED_DATA_ACCESS
376#endif
377
378#ifdef VBOX_WITH_NESTED_HWVIRT
379/**
380 * Check the common SVM instruction preconditions.
381 */
382# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
383 do { \
384 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
385 { \
386 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
387 return iemRaiseUndefinedOpcode(pVCpu); \
388 } \
389 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
390 { \
391 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
392 return iemRaiseUndefinedOpcode(pVCpu); \
393 } \
394 if (pVCpu->iem.s.uCpl != 0) \
395 { \
396 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
397 return iemRaiseGeneralProtectionFault0(pVCpu); \
398 } \
399 } while (0)
400
401/**
402 * Check if an SVM is enabled.
403 */
404# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
405
406/**
407 * Check if an SVM control/instruction intercept is set.
408 */
409# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
410
411/**
412 * Check if an SVM read CRx intercept is set.
413 */
414# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
415
416/**
417 * Check if an SVM write CRx intercept is set.
418 */
419# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
420
421/**
422 * Check if an SVM read DRx intercept is set.
423 */
424# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
425
426/**
427 * Check if an SVM write DRx intercept is set.
428 */
429# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
430
431/**
432 * Check if an SVM exception intercept is set.
433 */
434# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uVector)))
435
436/**
437 * Invokes the SVM \#VMEXIT handler for the nested-guest.
438 */
439# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
440 do \
441 { \
442 VBOXSTRICTRC rcStrictVmExit = HMSvmNstGstVmExit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), \
443 (a_uExitInfo2)); \
444 return rcStrictVmExit == VINF_SVM_VMEXIT ? VINF_SUCCESS : rcStrictVmExit; \
445 } while (0)
446
447/**
448 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
449 * corresponding decode assist information.
450 */
451# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
452 do \
453 { \
454 uint64_t uExitInfo1; \
455 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssist \
456 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
457 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
458 else \
459 uExitInfo1 = 0; \
460 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
461 } while (0)
462
463/**
464 * Checks and handles an SVM MSR intercept.
465 */
466# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) \
467 HMSvmNstGstHandleMsrIntercept((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_idMsr), (a_fWrite))
468
469#else
470# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
471# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
472# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
473# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
474# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
475# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
476# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
477# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
478# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
479# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
480# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) (VERR_SVM_IPE_1)
481
482#endif /* VBOX_WITH_NESTED_HWVIRT */
483
484
485/*********************************************************************************************************************************
486* Global Variables *
487*********************************************************************************************************************************/
488extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
489
490
491/** Function table for the ADD instruction. */
492IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
493{
494 iemAImpl_add_u8, iemAImpl_add_u8_locked,
495 iemAImpl_add_u16, iemAImpl_add_u16_locked,
496 iemAImpl_add_u32, iemAImpl_add_u32_locked,
497 iemAImpl_add_u64, iemAImpl_add_u64_locked
498};
499
500/** Function table for the ADC instruction. */
501IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
502{
503 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
504 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
505 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
506 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
507};
508
509/** Function table for the SUB instruction. */
510IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
511{
512 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
513 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
514 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
515 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
516};
517
518/** Function table for the SBB instruction. */
519IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
520{
521 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
522 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
523 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
524 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
525};
526
527/** Function table for the OR instruction. */
528IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
529{
530 iemAImpl_or_u8, iemAImpl_or_u8_locked,
531 iemAImpl_or_u16, iemAImpl_or_u16_locked,
532 iemAImpl_or_u32, iemAImpl_or_u32_locked,
533 iemAImpl_or_u64, iemAImpl_or_u64_locked
534};
535
536/** Function table for the XOR instruction. */
537IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
538{
539 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
540 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
541 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
542 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
543};
544
545/** Function table for the AND instruction. */
546IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
547{
548 iemAImpl_and_u8, iemAImpl_and_u8_locked,
549 iemAImpl_and_u16, iemAImpl_and_u16_locked,
550 iemAImpl_and_u32, iemAImpl_and_u32_locked,
551 iemAImpl_and_u64, iemAImpl_and_u64_locked
552};
553
554/** Function table for the CMP instruction.
555 * @remarks Making operand order ASSUMPTIONS.
556 */
557IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
558{
559 iemAImpl_cmp_u8, NULL,
560 iemAImpl_cmp_u16, NULL,
561 iemAImpl_cmp_u32, NULL,
562 iemAImpl_cmp_u64, NULL
563};
564
565/** Function table for the TEST instruction.
566 * @remarks Making operand order ASSUMPTIONS.
567 */
568IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
569{
570 iemAImpl_test_u8, NULL,
571 iemAImpl_test_u16, NULL,
572 iemAImpl_test_u32, NULL,
573 iemAImpl_test_u64, NULL
574};
575
576/** Function table for the BT instruction. */
577IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
578{
579 NULL, NULL,
580 iemAImpl_bt_u16, NULL,
581 iemAImpl_bt_u32, NULL,
582 iemAImpl_bt_u64, NULL
583};
584
585/** Function table for the BTC instruction. */
586IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
587{
588 NULL, NULL,
589 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
590 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
591 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
592};
593
594/** Function table for the BTR instruction. */
595IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
596{
597 NULL, NULL,
598 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
599 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
600 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
601};
602
603/** Function table for the BTS instruction. */
604IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
605{
606 NULL, NULL,
607 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
608 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
609 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
610};
611
612/** Function table for the BSF instruction. */
613IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
614{
615 NULL, NULL,
616 iemAImpl_bsf_u16, NULL,
617 iemAImpl_bsf_u32, NULL,
618 iemAImpl_bsf_u64, NULL
619};
620
621/** Function table for the BSR instruction. */
622IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
623{
624 NULL, NULL,
625 iemAImpl_bsr_u16, NULL,
626 iemAImpl_bsr_u32, NULL,
627 iemAImpl_bsr_u64, NULL
628};
629
630/** Function table for the IMUL instruction. */
631IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
632{
633 NULL, NULL,
634 iemAImpl_imul_two_u16, NULL,
635 iemAImpl_imul_two_u32, NULL,
636 iemAImpl_imul_two_u64, NULL
637};
638
639/** Group 1 /r lookup table. */
640IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
641{
642 &g_iemAImpl_add,
643 &g_iemAImpl_or,
644 &g_iemAImpl_adc,
645 &g_iemAImpl_sbb,
646 &g_iemAImpl_and,
647 &g_iemAImpl_sub,
648 &g_iemAImpl_xor,
649 &g_iemAImpl_cmp
650};
651
652/** Function table for the INC instruction. */
653IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
654{
655 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
656 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
657 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
658 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
659};
660
661/** Function table for the DEC instruction. */
662IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
663{
664 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
665 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
666 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
667 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
668};
669
670/** Function table for the NEG instruction. */
671IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
672{
673 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
674 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
675 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
676 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
677};
678
679/** Function table for the NOT instruction. */
680IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
681{
682 iemAImpl_not_u8, iemAImpl_not_u8_locked,
683 iemAImpl_not_u16, iemAImpl_not_u16_locked,
684 iemAImpl_not_u32, iemAImpl_not_u32_locked,
685 iemAImpl_not_u64, iemAImpl_not_u64_locked
686};
687
688
689/** Function table for the ROL instruction. */
690IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
691{
692 iemAImpl_rol_u8,
693 iemAImpl_rol_u16,
694 iemAImpl_rol_u32,
695 iemAImpl_rol_u64
696};
697
698/** Function table for the ROR instruction. */
699IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
700{
701 iemAImpl_ror_u8,
702 iemAImpl_ror_u16,
703 iemAImpl_ror_u32,
704 iemAImpl_ror_u64
705};
706
707/** Function table for the RCL instruction. */
708IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
709{
710 iemAImpl_rcl_u8,
711 iemAImpl_rcl_u16,
712 iemAImpl_rcl_u32,
713 iemAImpl_rcl_u64
714};
715
716/** Function table for the RCR instruction. */
717IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
718{
719 iemAImpl_rcr_u8,
720 iemAImpl_rcr_u16,
721 iemAImpl_rcr_u32,
722 iemAImpl_rcr_u64
723};
724
725/** Function table for the SHL instruction. */
726IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
727{
728 iemAImpl_shl_u8,
729 iemAImpl_shl_u16,
730 iemAImpl_shl_u32,
731 iemAImpl_shl_u64
732};
733
734/** Function table for the SHR instruction. */
735IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
736{
737 iemAImpl_shr_u8,
738 iemAImpl_shr_u16,
739 iemAImpl_shr_u32,
740 iemAImpl_shr_u64
741};
742
743/** Function table for the SAR instruction. */
744IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
745{
746 iemAImpl_sar_u8,
747 iemAImpl_sar_u16,
748 iemAImpl_sar_u32,
749 iemAImpl_sar_u64
750};
751
752
753/** Function table for the MUL instruction. */
754IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
755{
756 iemAImpl_mul_u8,
757 iemAImpl_mul_u16,
758 iemAImpl_mul_u32,
759 iemAImpl_mul_u64
760};
761
762/** Function table for the IMUL instruction working implicitly on rAX. */
763IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
764{
765 iemAImpl_imul_u8,
766 iemAImpl_imul_u16,
767 iemAImpl_imul_u32,
768 iemAImpl_imul_u64
769};
770
771/** Function table for the DIV instruction. */
772IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
773{
774 iemAImpl_div_u8,
775 iemAImpl_div_u16,
776 iemAImpl_div_u32,
777 iemAImpl_div_u64
778};
779
780/** Function table for the MUL instruction. */
781IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
782{
783 iemAImpl_idiv_u8,
784 iemAImpl_idiv_u16,
785 iemAImpl_idiv_u32,
786 iemAImpl_idiv_u64
787};
788
789/** Function table for the SHLD instruction */
790IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
791{
792 iemAImpl_shld_u16,
793 iemAImpl_shld_u32,
794 iemAImpl_shld_u64,
795};
796
797/** Function table for the SHRD instruction */
798IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
799{
800 iemAImpl_shrd_u16,
801 iemAImpl_shrd_u32,
802 iemAImpl_shrd_u64,
803};
804
805
806/** Function table for the PUNPCKLBW instruction */
807IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
808/** Function table for the PUNPCKLBD instruction */
809IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
810/** Function table for the PUNPCKLDQ instruction */
811IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
812/** Function table for the PUNPCKLQDQ instruction */
813IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
814
815/** Function table for the PUNPCKHBW instruction */
816IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
817/** Function table for the PUNPCKHBD instruction */
818IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
819/** Function table for the PUNPCKHDQ instruction */
820IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
821/** Function table for the PUNPCKHQDQ instruction */
822IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
823
824/** Function table for the PXOR instruction */
825IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
826/** Function table for the PCMPEQB instruction */
827IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
828/** Function table for the PCMPEQW instruction */
829IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
830/** Function table for the PCMPEQD instruction */
831IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
832
833
834#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
835/** What IEM just wrote. */
836uint8_t g_abIemWrote[256];
837/** How much IEM just wrote. */
838size_t g_cbIemWrote;
839#endif
840
841
842/*********************************************************************************************************************************
843* Internal Functions *
844*********************************************************************************************************************************/
845IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
846IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
847IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
848IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
849/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
850IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
851IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
852IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
853IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
854IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
855IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
856IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
857IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
858IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
859IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
860IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
861IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
862#ifdef IEM_WITH_SETJMP
863DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
864DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
865DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
866DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
868#endif
869
870IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
871IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
872IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
873IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
874IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
875IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
876IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
877IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
878IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
879IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
880IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
881IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
882IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
883IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
884IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
885IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
886
887#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
888IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
889#endif
890IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
891IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
892
893#ifdef VBOX_WITH_NESTED_HWVIRT
894/**
895 * Checks if the intercepted IO instruction causes a \#VMEXIT and handles it
896 * accordingly.
897 *
898 * @returns VBox strict status code.
899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
900 * @param u16Port The IO port being accessed.
901 * @param enmIoType The type of IO access.
902 * @param cbReg The IO operand size in bytes.
903 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
904 * @param iEffSeg The effective segment number.
905 * @param fRep Whether this is a repeating IO instruction (REP prefix).
906 * @param fStrIo Whether this is a string IO instruction.
907 * @param cbInstr The length of the IO instruction in bytes.
908 *
909 * @remarks This must be called only when IO instructions are intercepted by the
910 * nested-guest hypervisor.
911 */
912IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
913 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
914{
915 Assert(IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
916 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
917 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
918
919 static const uint32_t s_auIoOpSize[] = { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
920 static const uint32_t s_auIoAddrSize[] = { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
921
922 SVMIOIOEXITINFO IoExitInfo;
923 IoExitInfo.u = s_auIoOpSize[cbReg & 7];
924 IoExitInfo.u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
925 IoExitInfo.n.u1STR = fStrIo;
926 IoExitInfo.n.u1REP = fRep;
927 IoExitInfo.n.u3SEG = iEffSeg & 0x7;
928 IoExitInfo.n.u1Type = enmIoType;
929 IoExitInfo.n.u16Port = u16Port;
930
931 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
932 return HMSvmNstGstHandleIOIntercept(pVCpu, pCtx, &IoExitInfo, pCtx->rip + cbInstr);
933}
934
935#else
936IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
937 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
938{
939 RT_NOREF9(pVCpu, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo, cbInstr);
940 return VERR_IEM_IPE_9;
941}
942#endif /* VBOX_WITH_NESTED_HWVIRT */
943
944
945/**
946 * Sets the pass up status.
947 *
948 * @returns VINF_SUCCESS.
949 * @param pVCpu The cross context virtual CPU structure of the
950 * calling thread.
951 * @param rcPassUp The pass up status. Must be informational.
952 * VINF_SUCCESS is not allowed.
953 */
954IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
955{
956 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
957
958 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
959 if (rcOldPassUp == VINF_SUCCESS)
960 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
961 /* If both are EM scheduling codes, use EM priority rules. */
962 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
963 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
964 {
965 if (rcPassUp < rcOldPassUp)
966 {
967 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
968 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
969 }
970 else
971 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
972 }
973 /* Override EM scheduling with specific status code. */
974 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
975 {
976 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
977 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
978 }
979 /* Don't override specific status code, first come first served. */
980 else
981 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
982 return VINF_SUCCESS;
983}
984
985
986/**
987 * Calculates the CPU mode.
988 *
989 * This is mainly for updating IEMCPU::enmCpuMode.
990 *
991 * @returns CPU mode.
992 * @param pCtx The register context for the CPU.
993 */
994DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
995{
996 if (CPUMIsGuestIn64BitCodeEx(pCtx))
997 return IEMMODE_64BIT;
998 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
999 return IEMMODE_32BIT;
1000 return IEMMODE_16BIT;
1001}
1002
1003
1004/**
1005 * Initializes the execution state.
1006 *
1007 * @param pVCpu The cross context virtual CPU structure of the
1008 * calling thread.
1009 * @param fBypassHandlers Whether to bypass access handlers.
1010 *
1011 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1012 * side-effects in strict builds.
1013 */
1014DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1015{
1016 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1017
1018 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1019
1020#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1022 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1023 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1025 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1026 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1027 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1028 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1029#endif
1030
1031#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1032 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1033#endif
1034 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1035 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1036#ifdef VBOX_STRICT
1037 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1038 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1039 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1040 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1041 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1042 pVCpu->iem.s.uRexReg = 127;
1043 pVCpu->iem.s.uRexB = 127;
1044 pVCpu->iem.s.uRexIndex = 127;
1045 pVCpu->iem.s.iEffSeg = 127;
1046 pVCpu->iem.s.idxPrefix = 127;
1047 pVCpu->iem.s.uVex3rdReg = 127;
1048 pVCpu->iem.s.uVexLength = 127;
1049 pVCpu->iem.s.fEvexStuff = 127;
1050 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1051# ifdef IEM_WITH_CODE_TLB
1052 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1053 pVCpu->iem.s.pbInstrBuf = NULL;
1054 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1055 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1056 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1057 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1058# else
1059 pVCpu->iem.s.offOpcode = 127;
1060 pVCpu->iem.s.cbOpcode = 127;
1061# endif
1062#endif
1063
1064 pVCpu->iem.s.cActiveMappings = 0;
1065 pVCpu->iem.s.iNextMapping = 0;
1066 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1067 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1068#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1069 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1070 && pCtx->cs.u64Base == 0
1071 && pCtx->cs.u32Limit == UINT32_MAX
1072 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1073 if (!pVCpu->iem.s.fInPatchCode)
1074 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1075#endif
1076
1077#ifdef IEM_VERIFICATION_MODE_FULL
1078 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1079 pVCpu->iem.s.fNoRem = true;
1080#endif
1081}
1082
1083
1084/**
1085 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1086 *
1087 * @param pVCpu The cross context virtual CPU structure of the
1088 * calling thread.
1089 */
1090DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1091{
1092 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1093#ifdef IEM_VERIFICATION_MODE_FULL
1094 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1095#endif
1096#ifdef VBOX_STRICT
1097# ifdef IEM_WITH_CODE_TLB
1098 NOREF(pVCpu);
1099# else
1100 pVCpu->iem.s.cbOpcode = 0;
1101# endif
1102#else
1103 NOREF(pVCpu);
1104#endif
1105}
1106
1107
1108/**
1109 * Initializes the decoder state.
1110 *
1111 * iemReInitDecoder is mostly a copy of this function.
1112 *
1113 * @param pVCpu The cross context virtual CPU structure of the
1114 * calling thread.
1115 * @param fBypassHandlers Whether to bypass access handlers.
1116 */
1117DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1118{
1119 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1120
1121 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1122
1123#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1128 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1129 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1130 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1131 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1132#endif
1133
1134#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1135 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1136#endif
1137 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1138#ifdef IEM_VERIFICATION_MODE_FULL
1139 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1140 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1141#endif
1142 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1143 pVCpu->iem.s.enmCpuMode = enmMode;
1144 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1145 pVCpu->iem.s.enmEffAddrMode = enmMode;
1146 if (enmMode != IEMMODE_64BIT)
1147 {
1148 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1149 pVCpu->iem.s.enmEffOpSize = enmMode;
1150 }
1151 else
1152 {
1153 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1154 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1155 }
1156 pVCpu->iem.s.fPrefixes = 0;
1157 pVCpu->iem.s.uRexReg = 0;
1158 pVCpu->iem.s.uRexB = 0;
1159 pVCpu->iem.s.uRexIndex = 0;
1160 pVCpu->iem.s.idxPrefix = 0;
1161 pVCpu->iem.s.uVex3rdReg = 0;
1162 pVCpu->iem.s.uVexLength = 0;
1163 pVCpu->iem.s.fEvexStuff = 0;
1164 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1165#ifdef IEM_WITH_CODE_TLB
1166 pVCpu->iem.s.pbInstrBuf = NULL;
1167 pVCpu->iem.s.offInstrNextByte = 0;
1168 pVCpu->iem.s.offCurInstrStart = 0;
1169# ifdef VBOX_STRICT
1170 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1171 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1172 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1173# endif
1174#else
1175 pVCpu->iem.s.offOpcode = 0;
1176 pVCpu->iem.s.cbOpcode = 0;
1177#endif
1178 pVCpu->iem.s.cActiveMappings = 0;
1179 pVCpu->iem.s.iNextMapping = 0;
1180 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1181 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1182#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1183 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1184 && pCtx->cs.u64Base == 0
1185 && pCtx->cs.u32Limit == UINT32_MAX
1186 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1187 if (!pVCpu->iem.s.fInPatchCode)
1188 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1189#endif
1190
1191#ifdef DBGFTRACE_ENABLED
1192 switch (enmMode)
1193 {
1194 case IEMMODE_64BIT:
1195 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1196 break;
1197 case IEMMODE_32BIT:
1198 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1199 break;
1200 case IEMMODE_16BIT:
1201 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1202 break;
1203 }
1204#endif
1205}
1206
1207
1208/**
1209 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1210 *
1211 * This is mostly a copy of iemInitDecoder.
1212 *
1213 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1214 */
1215DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1216{
1217 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1218
1219 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1220
1221#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1222 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1224 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1226 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1227 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1228 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1229 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1230#endif
1231
1232 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1233#ifdef IEM_VERIFICATION_MODE_FULL
1234 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1235 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1236#endif
1237 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1238 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1239 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1240 pVCpu->iem.s.enmEffAddrMode = enmMode;
1241 if (enmMode != IEMMODE_64BIT)
1242 {
1243 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1244 pVCpu->iem.s.enmEffOpSize = enmMode;
1245 }
1246 else
1247 {
1248 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1249 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1250 }
1251 pVCpu->iem.s.fPrefixes = 0;
1252 pVCpu->iem.s.uRexReg = 0;
1253 pVCpu->iem.s.uRexB = 0;
1254 pVCpu->iem.s.uRexIndex = 0;
1255 pVCpu->iem.s.idxPrefix = 0;
1256 pVCpu->iem.s.uVex3rdReg = 0;
1257 pVCpu->iem.s.uVexLength = 0;
1258 pVCpu->iem.s.fEvexStuff = 0;
1259 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1260#ifdef IEM_WITH_CODE_TLB
1261 if (pVCpu->iem.s.pbInstrBuf)
1262 {
1263 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1264 - pVCpu->iem.s.uInstrBufPc;
1265 if (off < pVCpu->iem.s.cbInstrBufTotal)
1266 {
1267 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1268 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1269 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1270 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1271 else
1272 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1273 }
1274 else
1275 {
1276 pVCpu->iem.s.pbInstrBuf = NULL;
1277 pVCpu->iem.s.offInstrNextByte = 0;
1278 pVCpu->iem.s.offCurInstrStart = 0;
1279 pVCpu->iem.s.cbInstrBuf = 0;
1280 pVCpu->iem.s.cbInstrBufTotal = 0;
1281 }
1282 }
1283 else
1284 {
1285 pVCpu->iem.s.offInstrNextByte = 0;
1286 pVCpu->iem.s.offCurInstrStart = 0;
1287 pVCpu->iem.s.cbInstrBuf = 0;
1288 pVCpu->iem.s.cbInstrBufTotal = 0;
1289 }
1290#else
1291 pVCpu->iem.s.cbOpcode = 0;
1292 pVCpu->iem.s.offOpcode = 0;
1293#endif
1294 Assert(pVCpu->iem.s.cActiveMappings == 0);
1295 pVCpu->iem.s.iNextMapping = 0;
1296 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1297 Assert(pVCpu->iem.s.fBypassHandlers == false);
1298#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1299 if (!pVCpu->iem.s.fInPatchCode)
1300 { /* likely */ }
1301 else
1302 {
1303 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1304 && pCtx->cs.u64Base == 0
1305 && pCtx->cs.u32Limit == UINT32_MAX
1306 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1307 if (!pVCpu->iem.s.fInPatchCode)
1308 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1309 }
1310#endif
1311
1312#ifdef DBGFTRACE_ENABLED
1313 switch (enmMode)
1314 {
1315 case IEMMODE_64BIT:
1316 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1317 break;
1318 case IEMMODE_32BIT:
1319 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1320 break;
1321 case IEMMODE_16BIT:
1322 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1323 break;
1324 }
1325#endif
1326}
1327
1328
1329
1330/**
1331 * Prefetch opcodes the first time when starting executing.
1332 *
1333 * @returns Strict VBox status code.
1334 * @param pVCpu The cross context virtual CPU structure of the
1335 * calling thread.
1336 * @param fBypassHandlers Whether to bypass access handlers.
1337 */
1338IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1339{
1340#ifdef IEM_VERIFICATION_MODE_FULL
1341 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1342#endif
1343 iemInitDecoder(pVCpu, fBypassHandlers);
1344
1345#ifdef IEM_WITH_CODE_TLB
1346 /** @todo Do ITLB lookup here. */
1347
1348#else /* !IEM_WITH_CODE_TLB */
1349
1350 /*
1351 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1352 *
1353 * First translate CS:rIP to a physical address.
1354 */
1355 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1356 uint32_t cbToTryRead;
1357 RTGCPTR GCPtrPC;
1358 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1359 {
1360 cbToTryRead = PAGE_SIZE;
1361 GCPtrPC = pCtx->rip;
1362 if (IEM_IS_CANONICAL(GCPtrPC))
1363 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1364 else
1365 return iemRaiseGeneralProtectionFault0(pVCpu);
1366 }
1367 else
1368 {
1369 uint32_t GCPtrPC32 = pCtx->eip;
1370 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1371 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1372 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1373 else
1374 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1375 if (cbToTryRead) { /* likely */ }
1376 else /* overflowed */
1377 {
1378 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1379 cbToTryRead = UINT32_MAX;
1380 }
1381 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1382 Assert(GCPtrPC <= UINT32_MAX);
1383 }
1384
1385# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1386 /* Allow interpretation of patch manager code blocks since they can for
1387 instance throw #PFs for perfectly good reasons. */
1388 if (pVCpu->iem.s.fInPatchCode)
1389 {
1390 size_t cbRead = 0;
1391 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1392 AssertRCReturn(rc, rc);
1393 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1394 return VINF_SUCCESS;
1395 }
1396# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1397
1398 RTGCPHYS GCPhys;
1399 uint64_t fFlags;
1400 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1401 if (RT_SUCCESS(rc)) { /* probable */ }
1402 else
1403 {
1404 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1405 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1406 }
1407 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1408 else
1409 {
1410 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1411 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1412 }
1413 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1414 else
1415 {
1416 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1417 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1418 }
1419 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1420 /** @todo Check reserved bits and such stuff. PGM is better at doing
1421 * that, so do it when implementing the guest virtual address
1422 * TLB... */
1423
1424# ifdef IEM_VERIFICATION_MODE_FULL
1425 /*
1426 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1427 * instruction.
1428 */
1429 /** @todo optimize this differently by not using PGMPhysRead. */
1430 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1431 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1432 if ( offPrevOpcodes < cbOldOpcodes
1433 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1434 {
1435 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1436 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1437 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1438 pVCpu->iem.s.cbOpcode = cbNew;
1439 return VINF_SUCCESS;
1440 }
1441# endif
1442
1443 /*
1444 * Read the bytes at this address.
1445 */
1446 PVM pVM = pVCpu->CTX_SUFF(pVM);
1447# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1448 size_t cbActual;
1449 if ( PATMIsEnabled(pVM)
1450 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1451 {
1452 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1453 Assert(cbActual > 0);
1454 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1455 }
1456 else
1457# endif
1458 {
1459 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1460 if (cbToTryRead > cbLeftOnPage)
1461 cbToTryRead = cbLeftOnPage;
1462 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1463 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1464
1465 if (!pVCpu->iem.s.fBypassHandlers)
1466 {
1467 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1468 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1469 { /* likely */ }
1470 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1471 {
1472 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1473 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1474 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1475 }
1476 else
1477 {
1478 Log((RT_SUCCESS(rcStrict)
1479 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1480 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1481 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1482 return rcStrict;
1483 }
1484 }
1485 else
1486 {
1487 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1488 if (RT_SUCCESS(rc))
1489 { /* likely */ }
1490 else
1491 {
1492 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1493 GCPtrPC, GCPhys, rc, cbToTryRead));
1494 return rc;
1495 }
1496 }
1497 pVCpu->iem.s.cbOpcode = cbToTryRead;
1498 }
1499#endif /* !IEM_WITH_CODE_TLB */
1500 return VINF_SUCCESS;
1501}
1502
1503
1504/**
1505 * Invalidates the IEM TLBs.
1506 *
1507 * This is called internally as well as by PGM when moving GC mappings.
1508 *
1509 * @returns
1510 * @param pVCpu The cross context virtual CPU structure of the calling
1511 * thread.
1512 * @param fVmm Set when PGM calls us with a remapping.
1513 */
1514VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1515{
1516#ifdef IEM_WITH_CODE_TLB
1517 pVCpu->iem.s.cbInstrBufTotal = 0;
1518 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1519 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1520 { /* very likely */ }
1521 else
1522 {
1523 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1524 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1525 while (i-- > 0)
1526 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1527 }
1528#endif
1529
1530#ifdef IEM_WITH_DATA_TLB
1531 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1532 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1533 { /* very likely */ }
1534 else
1535 {
1536 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1537 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1538 while (i-- > 0)
1539 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1540 }
1541#endif
1542 NOREF(pVCpu); NOREF(fVmm);
1543}
1544
1545
1546/**
1547 * Invalidates a page in the TLBs.
1548 *
1549 * @param pVCpu The cross context virtual CPU structure of the calling
1550 * thread.
1551 * @param GCPtr The address of the page to invalidate
1552 */
1553VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1554{
1555#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1556 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1557 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1558 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1559 uintptr_t idx = (uint8_t)GCPtr;
1560
1561# ifdef IEM_WITH_CODE_TLB
1562 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1563 {
1564 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1565 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1566 pVCpu->iem.s.cbInstrBufTotal = 0;
1567 }
1568# endif
1569
1570# ifdef IEM_WITH_DATA_TLB
1571 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1572 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1573# endif
1574#else
1575 NOREF(pVCpu); NOREF(GCPtr);
1576#endif
1577}
1578
1579
1580/**
1581 * Invalidates the host physical aspects of the IEM TLBs.
1582 *
1583 * This is called internally as well as by PGM when moving GC mappings.
1584 *
1585 * @param pVCpu The cross context virtual CPU structure of the calling
1586 * thread.
1587 */
1588VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1589{
1590#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1591 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1592
1593# ifdef IEM_WITH_CODE_TLB
1594 pVCpu->iem.s.cbInstrBufTotal = 0;
1595# endif
1596 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1597 if (uTlbPhysRev != 0)
1598 {
1599 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1600 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1601 }
1602 else
1603 {
1604 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1605 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1606
1607 unsigned i;
1608# ifdef IEM_WITH_CODE_TLB
1609 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1610 while (i-- > 0)
1611 {
1612 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1613 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1614 }
1615# endif
1616# ifdef IEM_WITH_DATA_TLB
1617 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1618 while (i-- > 0)
1619 {
1620 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1621 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1622 }
1623# endif
1624 }
1625#else
1626 NOREF(pVCpu);
1627#endif
1628}
1629
1630
1631/**
1632 * Invalidates the host physical aspects of the IEM TLBs.
1633 *
1634 * This is called internally as well as by PGM when moving GC mappings.
1635 *
1636 * @param pVM The cross context VM structure.
1637 *
1638 * @remarks Caller holds the PGM lock.
1639 */
1640VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1641{
1642 RT_NOREF_PV(pVM);
1643}
1644
1645#ifdef IEM_WITH_CODE_TLB
1646
1647/**
1648 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1649 * failure and jumps.
1650 *
1651 * We end up here for a number of reasons:
1652 * - pbInstrBuf isn't yet initialized.
1653 * - Advancing beyond the buffer boundrary (e.g. cross page).
1654 * - Advancing beyond the CS segment limit.
1655 * - Fetching from non-mappable page (e.g. MMIO).
1656 *
1657 * @param pVCpu The cross context virtual CPU structure of the
1658 * calling thread.
1659 * @param pvDst Where to return the bytes.
1660 * @param cbDst Number of bytes to read.
1661 *
1662 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1663 */
1664IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1665{
1666#ifdef IN_RING3
1667//__debugbreak();
1668 for (;;)
1669 {
1670 Assert(cbDst <= 8);
1671 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1672
1673 /*
1674 * We might have a partial buffer match, deal with that first to make the
1675 * rest simpler. This is the first part of the cross page/buffer case.
1676 */
1677 if (pVCpu->iem.s.pbInstrBuf != NULL)
1678 {
1679 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1680 {
1681 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1682 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1683 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1684
1685 cbDst -= cbCopy;
1686 pvDst = (uint8_t *)pvDst + cbCopy;
1687 offBuf += cbCopy;
1688 pVCpu->iem.s.offInstrNextByte += offBuf;
1689 }
1690 }
1691
1692 /*
1693 * Check segment limit, figuring how much we're allowed to access at this point.
1694 *
1695 * We will fault immediately if RIP is past the segment limit / in non-canonical
1696 * territory. If we do continue, there are one or more bytes to read before we
1697 * end up in trouble and we need to do that first before faulting.
1698 */
1699 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1700 RTGCPTR GCPtrFirst;
1701 uint32_t cbMaxRead;
1702 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1703 {
1704 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1705 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1706 { /* likely */ }
1707 else
1708 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1709 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1710 }
1711 else
1712 {
1713 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1714 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1715 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1716 { /* likely */ }
1717 else
1718 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1719 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1720 if (cbMaxRead != 0)
1721 { /* likely */ }
1722 else
1723 {
1724 /* Overflowed because address is 0 and limit is max. */
1725 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1726 cbMaxRead = X86_PAGE_SIZE;
1727 }
1728 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1729 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1730 if (cbMaxRead2 < cbMaxRead)
1731 cbMaxRead = cbMaxRead2;
1732 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1733 }
1734
1735 /*
1736 * Get the TLB entry for this piece of code.
1737 */
1738 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1739 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1740 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1741 if (pTlbe->uTag == uTag)
1742 {
1743 /* likely when executing lots of code, otherwise unlikely */
1744# ifdef VBOX_WITH_STATISTICS
1745 pVCpu->iem.s.CodeTlb.cTlbHits++;
1746# endif
1747 }
1748 else
1749 {
1750 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1751# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1752 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1753 {
1754 pTlbe->uTag = uTag;
1755 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1756 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1757 pTlbe->GCPhys = NIL_RTGCPHYS;
1758 pTlbe->pbMappingR3 = NULL;
1759 }
1760 else
1761# endif
1762 {
1763 RTGCPHYS GCPhys;
1764 uint64_t fFlags;
1765 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1766 if (RT_FAILURE(rc))
1767 {
1768 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1769 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1770 }
1771
1772 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1773 pTlbe->uTag = uTag;
1774 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1775 pTlbe->GCPhys = GCPhys;
1776 pTlbe->pbMappingR3 = NULL;
1777 }
1778 }
1779
1780 /*
1781 * Check TLB page table level access flags.
1782 */
1783 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1784 {
1785 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1786 {
1787 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1788 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1789 }
1790 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1791 {
1792 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1793 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1794 }
1795 }
1796
1797# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1798 /*
1799 * Allow interpretation of patch manager code blocks since they can for
1800 * instance throw #PFs for perfectly good reasons.
1801 */
1802 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1803 { /* no unlikely */ }
1804 else
1805 {
1806 /** @todo Could be optimized this a little in ring-3 if we liked. */
1807 size_t cbRead = 0;
1808 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1809 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1810 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1811 return;
1812 }
1813# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1814
1815 /*
1816 * Look up the physical page info if necessary.
1817 */
1818 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1819 { /* not necessary */ }
1820 else
1821 {
1822 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1823 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1824 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1825 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1826 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1827 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1828 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1829 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1830 }
1831
1832# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1833 /*
1834 * Try do a direct read using the pbMappingR3 pointer.
1835 */
1836 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1837 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1838 {
1839 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1840 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1841 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1842 {
1843 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1844 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1845 }
1846 else
1847 {
1848 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1849 Assert(cbInstr < cbMaxRead);
1850 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1851 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1852 }
1853 if (cbDst <= cbMaxRead)
1854 {
1855 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1856 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1857 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1858 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1859 return;
1860 }
1861 pVCpu->iem.s.pbInstrBuf = NULL;
1862
1863 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1864 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1865 }
1866 else
1867# endif
1868#if 0
1869 /*
1870 * If there is no special read handling, so we can read a bit more and
1871 * put it in the prefetch buffer.
1872 */
1873 if ( cbDst < cbMaxRead
1874 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1875 {
1876 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1877 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1878 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1879 { /* likely */ }
1880 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1881 {
1882 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1883 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1884 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1885 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1886 }
1887 else
1888 {
1889 Log((RT_SUCCESS(rcStrict)
1890 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1891 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1892 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1893 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1894 }
1895 }
1896 /*
1897 * Special read handling, so only read exactly what's needed.
1898 * This is a highly unlikely scenario.
1899 */
1900 else
1901#endif
1902 {
1903 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1904 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1905 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1906 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1907 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1908 { /* likely */ }
1909 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1910 {
1911 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1912 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1913 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1914 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1915 }
1916 else
1917 {
1918 Log((RT_SUCCESS(rcStrict)
1919 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1920 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1921 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1922 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1923 }
1924 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1925 if (cbToRead == cbDst)
1926 return;
1927 }
1928
1929 /*
1930 * More to read, loop.
1931 */
1932 cbDst -= cbMaxRead;
1933 pvDst = (uint8_t *)pvDst + cbMaxRead;
1934 }
1935#else
1936 RT_NOREF(pvDst, cbDst);
1937 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1938#endif
1939}
1940
1941#else
1942
1943/**
1944 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1945 * exception if it fails.
1946 *
1947 * @returns Strict VBox status code.
1948 * @param pVCpu The cross context virtual CPU structure of the
1949 * calling thread.
1950 * @param cbMin The minimum number of bytes relative offOpcode
1951 * that must be read.
1952 */
1953IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1954{
1955 /*
1956 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1957 *
1958 * First translate CS:rIP to a physical address.
1959 */
1960 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1961 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1962 uint32_t cbToTryRead;
1963 RTGCPTR GCPtrNext;
1964 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1965 {
1966 cbToTryRead = PAGE_SIZE;
1967 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1968 if (!IEM_IS_CANONICAL(GCPtrNext))
1969 return iemRaiseGeneralProtectionFault0(pVCpu);
1970 }
1971 else
1972 {
1973 uint32_t GCPtrNext32 = pCtx->eip;
1974 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1975 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1976 if (GCPtrNext32 > pCtx->cs.u32Limit)
1977 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1978 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1979 if (!cbToTryRead) /* overflowed */
1980 {
1981 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1982 cbToTryRead = UINT32_MAX;
1983 /** @todo check out wrapping around the code segment. */
1984 }
1985 if (cbToTryRead < cbMin - cbLeft)
1986 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1987 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1988 }
1989
1990 /* Only read up to the end of the page, and make sure we don't read more
1991 than the opcode buffer can hold. */
1992 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1993 if (cbToTryRead > cbLeftOnPage)
1994 cbToTryRead = cbLeftOnPage;
1995 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1996 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1997/** @todo r=bird: Convert assertion into undefined opcode exception? */
1998 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1999
2000# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2001 /* Allow interpretation of patch manager code blocks since they can for
2002 instance throw #PFs for perfectly good reasons. */
2003 if (pVCpu->iem.s.fInPatchCode)
2004 {
2005 size_t cbRead = 0;
2006 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2007 AssertRCReturn(rc, rc);
2008 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2009 return VINF_SUCCESS;
2010 }
2011# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2012
2013 RTGCPHYS GCPhys;
2014 uint64_t fFlags;
2015 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2016 if (RT_FAILURE(rc))
2017 {
2018 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2019 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2020 }
2021 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2022 {
2023 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2024 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2025 }
2026 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2027 {
2028 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2029 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2030 }
2031 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2032 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2033 /** @todo Check reserved bits and such stuff. PGM is better at doing
2034 * that, so do it when implementing the guest virtual address
2035 * TLB... */
2036
2037 /*
2038 * Read the bytes at this address.
2039 *
2040 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2041 * and since PATM should only patch the start of an instruction there
2042 * should be no need to check again here.
2043 */
2044 if (!pVCpu->iem.s.fBypassHandlers)
2045 {
2046 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2047 cbToTryRead, PGMACCESSORIGIN_IEM);
2048 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2049 { /* likely */ }
2050 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2051 {
2052 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2053 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2054 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2055 }
2056 else
2057 {
2058 Log((RT_SUCCESS(rcStrict)
2059 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2060 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2061 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2062 return rcStrict;
2063 }
2064 }
2065 else
2066 {
2067 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2068 if (RT_SUCCESS(rc))
2069 { /* likely */ }
2070 else
2071 {
2072 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2073 return rc;
2074 }
2075 }
2076 pVCpu->iem.s.cbOpcode += cbToTryRead;
2077 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2078
2079 return VINF_SUCCESS;
2080}
2081
2082#endif /* !IEM_WITH_CODE_TLB */
2083#ifndef IEM_WITH_SETJMP
2084
2085/**
2086 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2087 *
2088 * @returns Strict VBox status code.
2089 * @param pVCpu The cross context virtual CPU structure of the
2090 * calling thread.
2091 * @param pb Where to return the opcode byte.
2092 */
2093DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2094{
2095 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2096 if (rcStrict == VINF_SUCCESS)
2097 {
2098 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2099 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2100 pVCpu->iem.s.offOpcode = offOpcode + 1;
2101 }
2102 else
2103 *pb = 0;
2104 return rcStrict;
2105}
2106
2107
2108/**
2109 * Fetches the next opcode byte.
2110 *
2111 * @returns Strict VBox status code.
2112 * @param pVCpu The cross context virtual CPU structure of the
2113 * calling thread.
2114 * @param pu8 Where to return the opcode byte.
2115 */
2116DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2117{
2118 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2119 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2120 {
2121 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2122 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2123 return VINF_SUCCESS;
2124 }
2125 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2126}
2127
2128#else /* IEM_WITH_SETJMP */
2129
2130/**
2131 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2132 *
2133 * @returns The opcode byte.
2134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2135 */
2136DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2137{
2138# ifdef IEM_WITH_CODE_TLB
2139 uint8_t u8;
2140 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2141 return u8;
2142# else
2143 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2144 if (rcStrict == VINF_SUCCESS)
2145 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2146 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2147# endif
2148}
2149
2150
2151/**
2152 * Fetches the next opcode byte, longjmp on error.
2153 *
2154 * @returns The opcode byte.
2155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2156 */
2157DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2158{
2159# ifdef IEM_WITH_CODE_TLB
2160 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2161 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2162 if (RT_LIKELY( pbBuf != NULL
2163 && offBuf < pVCpu->iem.s.cbInstrBuf))
2164 {
2165 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2166 return pbBuf[offBuf];
2167 }
2168# else
2169 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2170 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2171 {
2172 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2173 return pVCpu->iem.s.abOpcode[offOpcode];
2174 }
2175# endif
2176 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2177}
2178
2179#endif /* IEM_WITH_SETJMP */
2180
2181/**
2182 * Fetches the next opcode byte, returns automatically on failure.
2183 *
2184 * @param a_pu8 Where to return the opcode byte.
2185 * @remark Implicitly references pVCpu.
2186 */
2187#ifndef IEM_WITH_SETJMP
2188# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2189 do \
2190 { \
2191 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2192 if (rcStrict2 == VINF_SUCCESS) \
2193 { /* likely */ } \
2194 else \
2195 return rcStrict2; \
2196 } while (0)
2197#else
2198# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2199#endif /* IEM_WITH_SETJMP */
2200
2201
2202#ifndef IEM_WITH_SETJMP
2203/**
2204 * Fetches the next signed byte from the opcode stream.
2205 *
2206 * @returns Strict VBox status code.
2207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2208 * @param pi8 Where to return the signed byte.
2209 */
2210DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2211{
2212 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2213}
2214#endif /* !IEM_WITH_SETJMP */
2215
2216
2217/**
2218 * Fetches the next signed byte from the opcode stream, returning automatically
2219 * on failure.
2220 *
2221 * @param a_pi8 Where to return the signed byte.
2222 * @remark Implicitly references pVCpu.
2223 */
2224#ifndef IEM_WITH_SETJMP
2225# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2226 do \
2227 { \
2228 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2229 if (rcStrict2 != VINF_SUCCESS) \
2230 return rcStrict2; \
2231 } while (0)
2232#else /* IEM_WITH_SETJMP */
2233# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2234
2235#endif /* IEM_WITH_SETJMP */
2236
2237#ifndef IEM_WITH_SETJMP
2238
2239/**
2240 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2241 *
2242 * @returns Strict VBox status code.
2243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2244 * @param pu16 Where to return the opcode dword.
2245 */
2246DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2247{
2248 uint8_t u8;
2249 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2250 if (rcStrict == VINF_SUCCESS)
2251 *pu16 = (int8_t)u8;
2252 return rcStrict;
2253}
2254
2255
2256/**
2257 * Fetches the next signed byte from the opcode stream, extending it to
2258 * unsigned 16-bit.
2259 *
2260 * @returns Strict VBox status code.
2261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2262 * @param pu16 Where to return the unsigned word.
2263 */
2264DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2265{
2266 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2267 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2268 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2269
2270 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2271 pVCpu->iem.s.offOpcode = offOpcode + 1;
2272 return VINF_SUCCESS;
2273}
2274
2275#endif /* !IEM_WITH_SETJMP */
2276
2277/**
2278 * Fetches the next signed byte from the opcode stream and sign-extending it to
2279 * a word, returning automatically on failure.
2280 *
2281 * @param a_pu16 Where to return the word.
2282 * @remark Implicitly references pVCpu.
2283 */
2284#ifndef IEM_WITH_SETJMP
2285# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2286 do \
2287 { \
2288 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2289 if (rcStrict2 != VINF_SUCCESS) \
2290 return rcStrict2; \
2291 } while (0)
2292#else
2293# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2294#endif
2295
2296#ifndef IEM_WITH_SETJMP
2297
2298/**
2299 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2300 *
2301 * @returns Strict VBox status code.
2302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2303 * @param pu32 Where to return the opcode dword.
2304 */
2305DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2306{
2307 uint8_t u8;
2308 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2309 if (rcStrict == VINF_SUCCESS)
2310 *pu32 = (int8_t)u8;
2311 return rcStrict;
2312}
2313
2314
2315/**
2316 * Fetches the next signed byte from the opcode stream, extending it to
2317 * unsigned 32-bit.
2318 *
2319 * @returns Strict VBox status code.
2320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2321 * @param pu32 Where to return the unsigned dword.
2322 */
2323DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2324{
2325 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2326 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2327 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2328
2329 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2330 pVCpu->iem.s.offOpcode = offOpcode + 1;
2331 return VINF_SUCCESS;
2332}
2333
2334#endif /* !IEM_WITH_SETJMP */
2335
2336/**
2337 * Fetches the next signed byte from the opcode stream and sign-extending it to
2338 * a word, returning automatically on failure.
2339 *
2340 * @param a_pu32 Where to return the word.
2341 * @remark Implicitly references pVCpu.
2342 */
2343#ifndef IEM_WITH_SETJMP
2344#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2345 do \
2346 { \
2347 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2348 if (rcStrict2 != VINF_SUCCESS) \
2349 return rcStrict2; \
2350 } while (0)
2351#else
2352# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2353#endif
2354
2355#ifndef IEM_WITH_SETJMP
2356
2357/**
2358 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2359 *
2360 * @returns Strict VBox status code.
2361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2362 * @param pu64 Where to return the opcode qword.
2363 */
2364DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2365{
2366 uint8_t u8;
2367 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2368 if (rcStrict == VINF_SUCCESS)
2369 *pu64 = (int8_t)u8;
2370 return rcStrict;
2371}
2372
2373
2374/**
2375 * Fetches the next signed byte from the opcode stream, extending it to
2376 * unsigned 64-bit.
2377 *
2378 * @returns Strict VBox status code.
2379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2380 * @param pu64 Where to return the unsigned qword.
2381 */
2382DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2383{
2384 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2385 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2386 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2387
2388 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2389 pVCpu->iem.s.offOpcode = offOpcode + 1;
2390 return VINF_SUCCESS;
2391}
2392
2393#endif /* !IEM_WITH_SETJMP */
2394
2395
2396/**
2397 * Fetches the next signed byte from the opcode stream and sign-extending it to
2398 * a word, returning automatically on failure.
2399 *
2400 * @param a_pu64 Where to return the word.
2401 * @remark Implicitly references pVCpu.
2402 */
2403#ifndef IEM_WITH_SETJMP
2404# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2405 do \
2406 { \
2407 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2408 if (rcStrict2 != VINF_SUCCESS) \
2409 return rcStrict2; \
2410 } while (0)
2411#else
2412# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2413#endif
2414
2415
2416#ifndef IEM_WITH_SETJMP
2417
2418/**
2419 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2420 *
2421 * @returns Strict VBox status code.
2422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2423 * @param pu16 Where to return the opcode word.
2424 */
2425DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2426{
2427 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2428 if (rcStrict == VINF_SUCCESS)
2429 {
2430 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2431# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2432 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2433# else
2434 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2435# endif
2436 pVCpu->iem.s.offOpcode = offOpcode + 2;
2437 }
2438 else
2439 *pu16 = 0;
2440 return rcStrict;
2441}
2442
2443
2444/**
2445 * Fetches the next opcode word.
2446 *
2447 * @returns Strict VBox status code.
2448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2449 * @param pu16 Where to return the opcode word.
2450 */
2451DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2452{
2453 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2454 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2455 {
2456 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2457# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2458 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2459# else
2460 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2461# endif
2462 return VINF_SUCCESS;
2463 }
2464 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2465}
2466
2467#else /* IEM_WITH_SETJMP */
2468
2469/**
2470 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2471 *
2472 * @returns The opcode word.
2473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2474 */
2475DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2476{
2477# ifdef IEM_WITH_CODE_TLB
2478 uint16_t u16;
2479 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2480 return u16;
2481# else
2482 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2483 if (rcStrict == VINF_SUCCESS)
2484 {
2485 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2486 pVCpu->iem.s.offOpcode += 2;
2487# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2488 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2489# else
2490 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2491# endif
2492 }
2493 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2494# endif
2495}
2496
2497
2498/**
2499 * Fetches the next opcode word, longjmp on error.
2500 *
2501 * @returns The opcode word.
2502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2503 */
2504DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2505{
2506# ifdef IEM_WITH_CODE_TLB
2507 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2508 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2509 if (RT_LIKELY( pbBuf != NULL
2510 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2511 {
2512 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2513# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2514 return *(uint16_t const *)&pbBuf[offBuf];
2515# else
2516 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2517# endif
2518 }
2519# else
2520 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2521 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2522 {
2523 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2524# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2525 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2526# else
2527 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2528# endif
2529 }
2530# endif
2531 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2532}
2533
2534#endif /* IEM_WITH_SETJMP */
2535
2536
2537/**
2538 * Fetches the next opcode word, returns automatically on failure.
2539 *
2540 * @param a_pu16 Where to return the opcode word.
2541 * @remark Implicitly references pVCpu.
2542 */
2543#ifndef IEM_WITH_SETJMP
2544# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2545 do \
2546 { \
2547 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2548 if (rcStrict2 != VINF_SUCCESS) \
2549 return rcStrict2; \
2550 } while (0)
2551#else
2552# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2553#endif
2554
2555#ifndef IEM_WITH_SETJMP
2556
2557/**
2558 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2559 *
2560 * @returns Strict VBox status code.
2561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2562 * @param pu32 Where to return the opcode double word.
2563 */
2564DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2565{
2566 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2567 if (rcStrict == VINF_SUCCESS)
2568 {
2569 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2570 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2571 pVCpu->iem.s.offOpcode = offOpcode + 2;
2572 }
2573 else
2574 *pu32 = 0;
2575 return rcStrict;
2576}
2577
2578
2579/**
2580 * Fetches the next opcode word, zero extending it to a double word.
2581 *
2582 * @returns Strict VBox status code.
2583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2584 * @param pu32 Where to return the opcode double word.
2585 */
2586DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2587{
2588 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2589 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2590 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2591
2592 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2593 pVCpu->iem.s.offOpcode = offOpcode + 2;
2594 return VINF_SUCCESS;
2595}
2596
2597#endif /* !IEM_WITH_SETJMP */
2598
2599
2600/**
2601 * Fetches the next opcode word and zero extends it to a double word, returns
2602 * automatically on failure.
2603 *
2604 * @param a_pu32 Where to return the opcode double word.
2605 * @remark Implicitly references pVCpu.
2606 */
2607#ifndef IEM_WITH_SETJMP
2608# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2609 do \
2610 { \
2611 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2612 if (rcStrict2 != VINF_SUCCESS) \
2613 return rcStrict2; \
2614 } while (0)
2615#else
2616# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2617#endif
2618
2619#ifndef IEM_WITH_SETJMP
2620
2621/**
2622 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2623 *
2624 * @returns Strict VBox status code.
2625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2626 * @param pu64 Where to return the opcode quad word.
2627 */
2628DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2629{
2630 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2631 if (rcStrict == VINF_SUCCESS)
2632 {
2633 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2634 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2635 pVCpu->iem.s.offOpcode = offOpcode + 2;
2636 }
2637 else
2638 *pu64 = 0;
2639 return rcStrict;
2640}
2641
2642
2643/**
2644 * Fetches the next opcode word, zero extending it to a quad word.
2645 *
2646 * @returns Strict VBox status code.
2647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2648 * @param pu64 Where to return the opcode quad word.
2649 */
2650DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2651{
2652 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2653 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2654 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2655
2656 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2657 pVCpu->iem.s.offOpcode = offOpcode + 2;
2658 return VINF_SUCCESS;
2659}
2660
2661#endif /* !IEM_WITH_SETJMP */
2662
2663/**
2664 * Fetches the next opcode word and zero extends it to a quad word, returns
2665 * automatically on failure.
2666 *
2667 * @param a_pu64 Where to return the opcode quad word.
2668 * @remark Implicitly references pVCpu.
2669 */
2670#ifndef IEM_WITH_SETJMP
2671# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2672 do \
2673 { \
2674 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2675 if (rcStrict2 != VINF_SUCCESS) \
2676 return rcStrict2; \
2677 } while (0)
2678#else
2679# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2680#endif
2681
2682
2683#ifndef IEM_WITH_SETJMP
2684/**
2685 * Fetches the next signed word from the opcode stream.
2686 *
2687 * @returns Strict VBox status code.
2688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2689 * @param pi16 Where to return the signed word.
2690 */
2691DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2692{
2693 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2694}
2695#endif /* !IEM_WITH_SETJMP */
2696
2697
2698/**
2699 * Fetches the next signed word from the opcode stream, returning automatically
2700 * on failure.
2701 *
2702 * @param a_pi16 Where to return the signed word.
2703 * @remark Implicitly references pVCpu.
2704 */
2705#ifndef IEM_WITH_SETJMP
2706# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2707 do \
2708 { \
2709 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2710 if (rcStrict2 != VINF_SUCCESS) \
2711 return rcStrict2; \
2712 } while (0)
2713#else
2714# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2715#endif
2716
2717#ifndef IEM_WITH_SETJMP
2718
2719/**
2720 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2721 *
2722 * @returns Strict VBox status code.
2723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2724 * @param pu32 Where to return the opcode dword.
2725 */
2726DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2727{
2728 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2729 if (rcStrict == VINF_SUCCESS)
2730 {
2731 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2732# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2733 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2734# else
2735 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2736 pVCpu->iem.s.abOpcode[offOpcode + 1],
2737 pVCpu->iem.s.abOpcode[offOpcode + 2],
2738 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2739# endif
2740 pVCpu->iem.s.offOpcode = offOpcode + 4;
2741 }
2742 else
2743 *pu32 = 0;
2744 return rcStrict;
2745}
2746
2747
2748/**
2749 * Fetches the next opcode dword.
2750 *
2751 * @returns Strict VBox status code.
2752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2753 * @param pu32 Where to return the opcode double word.
2754 */
2755DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2756{
2757 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2758 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2759 {
2760 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2761# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2762 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2763# else
2764 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2765 pVCpu->iem.s.abOpcode[offOpcode + 1],
2766 pVCpu->iem.s.abOpcode[offOpcode + 2],
2767 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2768# endif
2769 return VINF_SUCCESS;
2770 }
2771 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2772}
2773
2774#else /* !IEM_WITH_SETJMP */
2775
2776/**
2777 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2778 *
2779 * @returns The opcode dword.
2780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2781 */
2782DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2783{
2784# ifdef IEM_WITH_CODE_TLB
2785 uint32_t u32;
2786 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2787 return u32;
2788# else
2789 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2790 if (rcStrict == VINF_SUCCESS)
2791 {
2792 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2793 pVCpu->iem.s.offOpcode = offOpcode + 4;
2794# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2795 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2796# else
2797 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2798 pVCpu->iem.s.abOpcode[offOpcode + 1],
2799 pVCpu->iem.s.abOpcode[offOpcode + 2],
2800 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2801# endif
2802 }
2803 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2804# endif
2805}
2806
2807
2808/**
2809 * Fetches the next opcode dword, longjmp on error.
2810 *
2811 * @returns The opcode dword.
2812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2813 */
2814DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2815{
2816# ifdef IEM_WITH_CODE_TLB
2817 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2818 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2819 if (RT_LIKELY( pbBuf != NULL
2820 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2821 {
2822 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2823# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2824 return *(uint32_t const *)&pbBuf[offBuf];
2825# else
2826 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2827 pbBuf[offBuf + 1],
2828 pbBuf[offBuf + 2],
2829 pbBuf[offBuf + 3]);
2830# endif
2831 }
2832# else
2833 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2834 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2835 {
2836 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2837# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2838 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2839# else
2840 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2841 pVCpu->iem.s.abOpcode[offOpcode + 1],
2842 pVCpu->iem.s.abOpcode[offOpcode + 2],
2843 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2844# endif
2845 }
2846# endif
2847 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2848}
2849
2850#endif /* !IEM_WITH_SETJMP */
2851
2852
2853/**
2854 * Fetches the next opcode dword, returns automatically on failure.
2855 *
2856 * @param a_pu32 Where to return the opcode dword.
2857 * @remark Implicitly references pVCpu.
2858 */
2859#ifndef IEM_WITH_SETJMP
2860# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2861 do \
2862 { \
2863 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2864 if (rcStrict2 != VINF_SUCCESS) \
2865 return rcStrict2; \
2866 } while (0)
2867#else
2868# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2869#endif
2870
2871#ifndef IEM_WITH_SETJMP
2872
2873/**
2874 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2875 *
2876 * @returns Strict VBox status code.
2877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2878 * @param pu64 Where to return the opcode dword.
2879 */
2880DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2881{
2882 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2883 if (rcStrict == VINF_SUCCESS)
2884 {
2885 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2886 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2887 pVCpu->iem.s.abOpcode[offOpcode + 1],
2888 pVCpu->iem.s.abOpcode[offOpcode + 2],
2889 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2890 pVCpu->iem.s.offOpcode = offOpcode + 4;
2891 }
2892 else
2893 *pu64 = 0;
2894 return rcStrict;
2895}
2896
2897
2898/**
2899 * Fetches the next opcode dword, zero extending it to a quad word.
2900 *
2901 * @returns Strict VBox status code.
2902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2903 * @param pu64 Where to return the opcode quad word.
2904 */
2905DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2906{
2907 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2908 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2909 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2910
2911 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2912 pVCpu->iem.s.abOpcode[offOpcode + 1],
2913 pVCpu->iem.s.abOpcode[offOpcode + 2],
2914 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2915 pVCpu->iem.s.offOpcode = offOpcode + 4;
2916 return VINF_SUCCESS;
2917}
2918
2919#endif /* !IEM_WITH_SETJMP */
2920
2921
2922/**
2923 * Fetches the next opcode dword and zero extends it to a quad word, returns
2924 * automatically on failure.
2925 *
2926 * @param a_pu64 Where to return the opcode quad word.
2927 * @remark Implicitly references pVCpu.
2928 */
2929#ifndef IEM_WITH_SETJMP
2930# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2931 do \
2932 { \
2933 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2934 if (rcStrict2 != VINF_SUCCESS) \
2935 return rcStrict2; \
2936 } while (0)
2937#else
2938# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2939#endif
2940
2941
2942#ifndef IEM_WITH_SETJMP
2943/**
2944 * Fetches the next signed double word from the opcode stream.
2945 *
2946 * @returns Strict VBox status code.
2947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2948 * @param pi32 Where to return the signed double word.
2949 */
2950DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2951{
2952 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2953}
2954#endif
2955
2956/**
2957 * Fetches the next signed double word from the opcode stream, returning
2958 * automatically on failure.
2959 *
2960 * @param a_pi32 Where to return the signed double word.
2961 * @remark Implicitly references pVCpu.
2962 */
2963#ifndef IEM_WITH_SETJMP
2964# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2965 do \
2966 { \
2967 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2968 if (rcStrict2 != VINF_SUCCESS) \
2969 return rcStrict2; \
2970 } while (0)
2971#else
2972# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2973#endif
2974
2975#ifndef IEM_WITH_SETJMP
2976
2977/**
2978 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2979 *
2980 * @returns Strict VBox status code.
2981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2982 * @param pu64 Where to return the opcode qword.
2983 */
2984DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2985{
2986 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2987 if (rcStrict == VINF_SUCCESS)
2988 {
2989 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2990 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2991 pVCpu->iem.s.abOpcode[offOpcode + 1],
2992 pVCpu->iem.s.abOpcode[offOpcode + 2],
2993 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2994 pVCpu->iem.s.offOpcode = offOpcode + 4;
2995 }
2996 else
2997 *pu64 = 0;
2998 return rcStrict;
2999}
3000
3001
3002/**
3003 * Fetches the next opcode dword, sign extending it into a quad word.
3004 *
3005 * @returns Strict VBox status code.
3006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3007 * @param pu64 Where to return the opcode quad word.
3008 */
3009DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3010{
3011 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3012 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3013 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3014
3015 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3016 pVCpu->iem.s.abOpcode[offOpcode + 1],
3017 pVCpu->iem.s.abOpcode[offOpcode + 2],
3018 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3019 *pu64 = i32;
3020 pVCpu->iem.s.offOpcode = offOpcode + 4;
3021 return VINF_SUCCESS;
3022}
3023
3024#endif /* !IEM_WITH_SETJMP */
3025
3026
3027/**
3028 * Fetches the next opcode double word and sign extends it to a quad word,
3029 * returns automatically on failure.
3030 *
3031 * @param a_pu64 Where to return the opcode quad word.
3032 * @remark Implicitly references pVCpu.
3033 */
3034#ifndef IEM_WITH_SETJMP
3035# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3036 do \
3037 { \
3038 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3039 if (rcStrict2 != VINF_SUCCESS) \
3040 return rcStrict2; \
3041 } while (0)
3042#else
3043# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3044#endif
3045
3046#ifndef IEM_WITH_SETJMP
3047
3048/**
3049 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3050 *
3051 * @returns Strict VBox status code.
3052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3053 * @param pu64 Where to return the opcode qword.
3054 */
3055DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3056{
3057 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3058 if (rcStrict == VINF_SUCCESS)
3059 {
3060 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3061# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3062 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3063# else
3064 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3065 pVCpu->iem.s.abOpcode[offOpcode + 1],
3066 pVCpu->iem.s.abOpcode[offOpcode + 2],
3067 pVCpu->iem.s.abOpcode[offOpcode + 3],
3068 pVCpu->iem.s.abOpcode[offOpcode + 4],
3069 pVCpu->iem.s.abOpcode[offOpcode + 5],
3070 pVCpu->iem.s.abOpcode[offOpcode + 6],
3071 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3072# endif
3073 pVCpu->iem.s.offOpcode = offOpcode + 8;
3074 }
3075 else
3076 *pu64 = 0;
3077 return rcStrict;
3078}
3079
3080
3081/**
3082 * Fetches the next opcode qword.
3083 *
3084 * @returns Strict VBox status code.
3085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3086 * @param pu64 Where to return the opcode qword.
3087 */
3088DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3089{
3090 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3091 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3092 {
3093# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3094 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3095# else
3096 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3097 pVCpu->iem.s.abOpcode[offOpcode + 1],
3098 pVCpu->iem.s.abOpcode[offOpcode + 2],
3099 pVCpu->iem.s.abOpcode[offOpcode + 3],
3100 pVCpu->iem.s.abOpcode[offOpcode + 4],
3101 pVCpu->iem.s.abOpcode[offOpcode + 5],
3102 pVCpu->iem.s.abOpcode[offOpcode + 6],
3103 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3104# endif
3105 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3106 return VINF_SUCCESS;
3107 }
3108 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3109}
3110
3111#else /* IEM_WITH_SETJMP */
3112
3113/**
3114 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3115 *
3116 * @returns The opcode qword.
3117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3118 */
3119DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3120{
3121# ifdef IEM_WITH_CODE_TLB
3122 uint64_t u64;
3123 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3124 return u64;
3125# else
3126 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3127 if (rcStrict == VINF_SUCCESS)
3128 {
3129 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3130 pVCpu->iem.s.offOpcode = offOpcode + 8;
3131# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3132 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3133# else
3134 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3135 pVCpu->iem.s.abOpcode[offOpcode + 1],
3136 pVCpu->iem.s.abOpcode[offOpcode + 2],
3137 pVCpu->iem.s.abOpcode[offOpcode + 3],
3138 pVCpu->iem.s.abOpcode[offOpcode + 4],
3139 pVCpu->iem.s.abOpcode[offOpcode + 5],
3140 pVCpu->iem.s.abOpcode[offOpcode + 6],
3141 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3142# endif
3143 }
3144 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3145# endif
3146}
3147
3148
3149/**
3150 * Fetches the next opcode qword, longjmp on error.
3151 *
3152 * @returns The opcode qword.
3153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3154 */
3155DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3156{
3157# ifdef IEM_WITH_CODE_TLB
3158 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3159 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3160 if (RT_LIKELY( pbBuf != NULL
3161 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3162 {
3163 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3164# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3165 return *(uint64_t const *)&pbBuf[offBuf];
3166# else
3167 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3168 pbBuf[offBuf + 1],
3169 pbBuf[offBuf + 2],
3170 pbBuf[offBuf + 3],
3171 pbBuf[offBuf + 4],
3172 pbBuf[offBuf + 5],
3173 pbBuf[offBuf + 6],
3174 pbBuf[offBuf + 7]);
3175# endif
3176 }
3177# else
3178 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3179 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3180 {
3181 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3182# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3183 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3184# else
3185 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3186 pVCpu->iem.s.abOpcode[offOpcode + 1],
3187 pVCpu->iem.s.abOpcode[offOpcode + 2],
3188 pVCpu->iem.s.abOpcode[offOpcode + 3],
3189 pVCpu->iem.s.abOpcode[offOpcode + 4],
3190 pVCpu->iem.s.abOpcode[offOpcode + 5],
3191 pVCpu->iem.s.abOpcode[offOpcode + 6],
3192 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3193# endif
3194 }
3195# endif
3196 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3197}
3198
3199#endif /* IEM_WITH_SETJMP */
3200
3201/**
3202 * Fetches the next opcode quad word, returns automatically on failure.
3203 *
3204 * @param a_pu64 Where to return the opcode quad word.
3205 * @remark Implicitly references pVCpu.
3206 */
3207#ifndef IEM_WITH_SETJMP
3208# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3209 do \
3210 { \
3211 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3212 if (rcStrict2 != VINF_SUCCESS) \
3213 return rcStrict2; \
3214 } while (0)
3215#else
3216# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3217#endif
3218
3219
3220/** @name Misc Worker Functions.
3221 * @{
3222 */
3223
3224/**
3225 * Gets the exception class for the specified exception vector.
3226 *
3227 * @returns The class of the specified exception.
3228 * @param uVector The exception vector.
3229 */
3230IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3231{
3232 Assert(uVector <= X86_XCPT_LAST);
3233 switch (uVector)
3234 {
3235 case X86_XCPT_DE:
3236 case X86_XCPT_TS:
3237 case X86_XCPT_NP:
3238 case X86_XCPT_SS:
3239 case X86_XCPT_GP:
3240 case X86_XCPT_SX: /* AMD only */
3241 return IEMXCPTCLASS_CONTRIBUTORY;
3242
3243 case X86_XCPT_PF:
3244 case X86_XCPT_VE: /* Intel only */
3245 return IEMXCPTCLASS_PAGE_FAULT;
3246 }
3247 return IEMXCPTCLASS_BENIGN;
3248}
3249
3250
3251/**
3252 * Evaluates how to handle an exception caused during delivery of another event
3253 * (exception / interrupt).
3254 *
3255 * @returns How to handle the recursive exception.
3256 * @param pVCpu The cross context virtual CPU structure of the
3257 * calling thread.
3258 * @param fPrevFlags The flags of the previous event.
3259 * @param uPrevVector The vector of the previous event.
3260 * @param fCurFlags The flags of the current exception.
3261 * @param uCurVector The vector of the current exception.
3262 * @param pfXcptRaiseInfo Where to store additional information about the
3263 * exception condition. Optional.
3264 */
3265VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3266 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3267{
3268 /*
3269 * Only CPU exceptions can be raised while delivering other events, software interrupt
3270 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3271 */
3272 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3273 Assert(pVCpu); RT_NOREF(pVCpu);
3274
3275 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3276 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3277 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3278 {
3279 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3280 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3281 {
3282 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3283 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3284 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3285 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3286 {
3287 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3288 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3289 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3290 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3291 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3292 }
3293 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3294 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3295 {
3296 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3297 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%u uCurVector=%u -> #DF\n", uPrevVector, uCurVector));
3298 }
3299 else if ( uPrevVector == X86_XCPT_DF
3300 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3301 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3302 {
3303 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3304 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3305 }
3306 }
3307 else
3308 {
3309 if (uPrevVector == X86_XCPT_NMI)
3310 {
3311 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3312 if (uCurVector == X86_XCPT_PF)
3313 {
3314 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3315 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3316 }
3317 }
3318 else if ( uPrevVector == X86_XCPT_AC
3319 && uCurVector == X86_XCPT_AC)
3320 {
3321 enmRaise = IEMXCPTRAISE_CPU_HANG;
3322 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3323 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3324 }
3325 }
3326 }
3327 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3328 {
3329 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3330 if (uCurVector == X86_XCPT_PF)
3331 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3332 }
3333 else
3334 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3335
3336 if (pfXcptRaiseInfo)
3337 *pfXcptRaiseInfo = fRaiseInfo;
3338 return enmRaise;
3339}
3340
3341
3342/**
3343 * Enters the CPU shutdown state initiated by a triple fault or other
3344 * unrecoverable conditions.
3345 *
3346 * @returns Strict VBox status code.
3347 * @param pVCpu The cross context virtual CPU structure of the
3348 * calling thread.
3349 */
3350IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3351{
3352 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3353 {
3354 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3355 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3356 }
3357
3358 RT_NOREF(pVCpu);
3359 return VINF_EM_TRIPLE_FAULT;
3360}
3361
3362
3363#ifdef VBOX_WITH_NESTED_HWVIRT
3364IEM_STATIC VBOXSTRICTRC iemHandleSvmNstGstEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
3365 uint32_t uErr, uint64_t uCr2)
3366{
3367 Assert(IEM_IS_SVM_ENABLED(pVCpu));
3368
3369 /*
3370 * Handle nested-guest SVM exception and software interrupt intercepts,
3371 * see AMD spec. 15.12 "Exception Intercepts".
3372 *
3373 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.
3374 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
3375 * even when they use a vector in the range 0 to 31.
3376 * - ICEBP should not trigger #DB intercept, but its own intercept.
3377 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
3378 */
3379 /* Check NMI intercept */
3380 if ( u8Vector == X86_XCPT_NMI
3381 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3382 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
3383 {
3384 Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));
3385 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3386 }
3387
3388 /* Check ICEBP intercept. */
3389 if ( (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
3390 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_ICEBP))
3391 {
3392 Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));
3393 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3394 }
3395
3396 /* Check CPU exception intercepts. */
3397 if ( (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3398 && IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector))
3399 {
3400 Assert(u8Vector <= X86_XCPT_LAST);
3401 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
3402 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
3403 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist
3404 && u8Vector == X86_XCPT_PF
3405 && !(uErr & X86_TRAP_PF_ID))
3406 {
3407 /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */
3408#ifdef IEM_WITH_CODE_TLB
3409#else
3410 uint8_t const offOpCode = pVCpu->iem.s.offOpcode;
3411 uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;
3412 if ( cbCurrent > 0
3413 && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr))
3414 {
3415 Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));
3416 memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);
3417 }
3418#endif
3419 }
3420 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept. u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n",
3421 u8Vector, uExitInfo1, uExitInfo2));
3422 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);
3423 }
3424
3425 /* Check software interrupt (INTn) intercepts. */
3426 if ( (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3427 | IEM_XCPT_FLAGS_BP_INSTR
3428 | IEM_XCPT_FLAGS_ICEBP_INSTR
3429 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3430 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN))
3431 {
3432 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0;
3433 Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
3434 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
3435 }
3436
3437 return VINF_HM_INTERCEPT_NOT_ACTIVE;
3438}
3439#endif
3440
3441/**
3442 * Validates a new SS segment.
3443 *
3444 * @returns VBox strict status code.
3445 * @param pVCpu The cross context virtual CPU structure of the
3446 * calling thread.
3447 * @param pCtx The CPU context.
3448 * @param NewSS The new SS selctor.
3449 * @param uCpl The CPL to load the stack for.
3450 * @param pDesc Where to return the descriptor.
3451 */
3452IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3453{
3454 NOREF(pCtx);
3455
3456 /* Null selectors are not allowed (we're not called for dispatching
3457 interrupts with SS=0 in long mode). */
3458 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3459 {
3460 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3461 return iemRaiseTaskSwitchFault0(pVCpu);
3462 }
3463
3464 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3465 if ((NewSS & X86_SEL_RPL) != uCpl)
3466 {
3467 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3468 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3469 }
3470
3471 /*
3472 * Read the descriptor.
3473 */
3474 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3475 if (rcStrict != VINF_SUCCESS)
3476 return rcStrict;
3477
3478 /*
3479 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3480 */
3481 if (!pDesc->Legacy.Gen.u1DescType)
3482 {
3483 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3484 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3485 }
3486
3487 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3488 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3489 {
3490 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3491 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3492 }
3493 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3494 {
3495 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3496 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3497 }
3498
3499 /* Is it there? */
3500 /** @todo testcase: Is this checked before the canonical / limit check below? */
3501 if (!pDesc->Legacy.Gen.u1Present)
3502 {
3503 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3504 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3505 }
3506
3507 return VINF_SUCCESS;
3508}
3509
3510
3511/**
3512 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3513 * not.
3514 *
3515 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3516 * @param a_pCtx The CPU context.
3517 */
3518#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3519# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3520 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3521 ? (a_pCtx)->eflags.u \
3522 : CPUMRawGetEFlags(a_pVCpu) )
3523#else
3524# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3525 ( (a_pCtx)->eflags.u )
3526#endif
3527
3528/**
3529 * Updates the EFLAGS in the correct manner wrt. PATM.
3530 *
3531 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3532 * @param a_pCtx The CPU context.
3533 * @param a_fEfl The new EFLAGS.
3534 */
3535#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3536# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3537 do { \
3538 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3539 (a_pCtx)->eflags.u = (a_fEfl); \
3540 else \
3541 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3542 } while (0)
3543#else
3544# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3545 do { \
3546 (a_pCtx)->eflags.u = (a_fEfl); \
3547 } while (0)
3548#endif
3549
3550
3551/** @} */
3552
3553/** @name Raising Exceptions.
3554 *
3555 * @{
3556 */
3557
3558
3559/**
3560 * Loads the specified stack far pointer from the TSS.
3561 *
3562 * @returns VBox strict status code.
3563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3564 * @param pCtx The CPU context.
3565 * @param uCpl The CPL to load the stack for.
3566 * @param pSelSS Where to return the new stack segment.
3567 * @param puEsp Where to return the new stack pointer.
3568 */
3569IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3570 PRTSEL pSelSS, uint32_t *puEsp)
3571{
3572 VBOXSTRICTRC rcStrict;
3573 Assert(uCpl < 4);
3574
3575 switch (pCtx->tr.Attr.n.u4Type)
3576 {
3577 /*
3578 * 16-bit TSS (X86TSS16).
3579 */
3580 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); /* fall thru */
3581 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3582 {
3583 uint32_t off = uCpl * 4 + 2;
3584 if (off + 4 <= pCtx->tr.u32Limit)
3585 {
3586 /** @todo check actual access pattern here. */
3587 uint32_t u32Tmp = 0; /* gcc maybe... */
3588 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3589 if (rcStrict == VINF_SUCCESS)
3590 {
3591 *puEsp = RT_LOWORD(u32Tmp);
3592 *pSelSS = RT_HIWORD(u32Tmp);
3593 return VINF_SUCCESS;
3594 }
3595 }
3596 else
3597 {
3598 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3599 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3600 }
3601 break;
3602 }
3603
3604 /*
3605 * 32-bit TSS (X86TSS32).
3606 */
3607 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); /* fall thru */
3608 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3609 {
3610 uint32_t off = uCpl * 8 + 4;
3611 if (off + 7 <= pCtx->tr.u32Limit)
3612 {
3613/** @todo check actual access pattern here. */
3614 uint64_t u64Tmp;
3615 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3616 if (rcStrict == VINF_SUCCESS)
3617 {
3618 *puEsp = u64Tmp & UINT32_MAX;
3619 *pSelSS = (RTSEL)(u64Tmp >> 32);
3620 return VINF_SUCCESS;
3621 }
3622 }
3623 else
3624 {
3625 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3626 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3627 }
3628 break;
3629 }
3630
3631 default:
3632 AssertFailed();
3633 rcStrict = VERR_IEM_IPE_4;
3634 break;
3635 }
3636
3637 *puEsp = 0; /* make gcc happy */
3638 *pSelSS = 0; /* make gcc happy */
3639 return rcStrict;
3640}
3641
3642
3643/**
3644 * Loads the specified stack pointer from the 64-bit TSS.
3645 *
3646 * @returns VBox strict status code.
3647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3648 * @param pCtx The CPU context.
3649 * @param uCpl The CPL to load the stack for.
3650 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3651 * @param puRsp Where to return the new stack pointer.
3652 */
3653IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3654{
3655 Assert(uCpl < 4);
3656 Assert(uIst < 8);
3657 *puRsp = 0; /* make gcc happy */
3658
3659 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3660
3661 uint32_t off;
3662 if (uIst)
3663 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3664 else
3665 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3666 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3667 {
3668 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3669 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3670 }
3671
3672 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3673}
3674
3675
3676/**
3677 * Adjust the CPU state according to the exception being raised.
3678 *
3679 * @param pCtx The CPU context.
3680 * @param u8Vector The exception that has been raised.
3681 */
3682DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3683{
3684 switch (u8Vector)
3685 {
3686 case X86_XCPT_DB:
3687 pCtx->dr[7] &= ~X86_DR7_GD;
3688 break;
3689 /** @todo Read the AMD and Intel exception reference... */
3690 }
3691}
3692
3693
3694/**
3695 * Implements exceptions and interrupts for real mode.
3696 *
3697 * @returns VBox strict status code.
3698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3699 * @param pCtx The CPU context.
3700 * @param cbInstr The number of bytes to offset rIP by in the return
3701 * address.
3702 * @param u8Vector The interrupt / exception vector number.
3703 * @param fFlags The flags.
3704 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3705 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3706 */
3707IEM_STATIC VBOXSTRICTRC
3708iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3709 PCPUMCTX pCtx,
3710 uint8_t cbInstr,
3711 uint8_t u8Vector,
3712 uint32_t fFlags,
3713 uint16_t uErr,
3714 uint64_t uCr2)
3715{
3716 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3717 NOREF(uErr); NOREF(uCr2);
3718
3719 /*
3720 * Read the IDT entry.
3721 */
3722 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3723 {
3724 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3725 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3726 }
3727 RTFAR16 Idte;
3728 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3729 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3730 return rcStrict;
3731
3732 /*
3733 * Push the stack frame.
3734 */
3735 uint16_t *pu16Frame;
3736 uint64_t uNewRsp;
3737 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3738 if (rcStrict != VINF_SUCCESS)
3739 return rcStrict;
3740
3741 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3742#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3743 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3744 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3745 fEfl |= UINT16_C(0xf000);
3746#endif
3747 pu16Frame[2] = (uint16_t)fEfl;
3748 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3749 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3750 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3751 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3752 return rcStrict;
3753
3754 /*
3755 * Load the vector address into cs:ip and make exception specific state
3756 * adjustments.
3757 */
3758 pCtx->cs.Sel = Idte.sel;
3759 pCtx->cs.ValidSel = Idte.sel;
3760 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3761 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3762 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3763 pCtx->rip = Idte.off;
3764 fEfl &= ~X86_EFL_IF;
3765 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3766
3767 /** @todo do we actually do this in real mode? */
3768 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3769 iemRaiseXcptAdjustState(pCtx, u8Vector);
3770
3771 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3772}
3773
3774
3775/**
3776 * Loads a NULL data selector into when coming from V8086 mode.
3777 *
3778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3779 * @param pSReg Pointer to the segment register.
3780 */
3781IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3782{
3783 pSReg->Sel = 0;
3784 pSReg->ValidSel = 0;
3785 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3786 {
3787 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3788 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3789 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3790 }
3791 else
3792 {
3793 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3794 /** @todo check this on AMD-V */
3795 pSReg->u64Base = 0;
3796 pSReg->u32Limit = 0;
3797 }
3798}
3799
3800
3801/**
3802 * Loads a segment selector during a task switch in V8086 mode.
3803 *
3804 * @param pSReg Pointer to the segment register.
3805 * @param uSel The selector value to load.
3806 */
3807IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3808{
3809 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3810 pSReg->Sel = uSel;
3811 pSReg->ValidSel = uSel;
3812 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3813 pSReg->u64Base = uSel << 4;
3814 pSReg->u32Limit = 0xffff;
3815 pSReg->Attr.u = 0xf3;
3816}
3817
3818
3819/**
3820 * Loads a NULL data selector into a selector register, both the hidden and
3821 * visible parts, in protected mode.
3822 *
3823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3824 * @param pSReg Pointer to the segment register.
3825 * @param uRpl The RPL.
3826 */
3827IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3828{
3829 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3830 * data selector in protected mode. */
3831 pSReg->Sel = uRpl;
3832 pSReg->ValidSel = uRpl;
3833 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3834 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3835 {
3836 /* VT-x (Intel 3960x) observed doing something like this. */
3837 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3838 pSReg->u32Limit = UINT32_MAX;
3839 pSReg->u64Base = 0;
3840 }
3841 else
3842 {
3843 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3844 pSReg->u32Limit = 0;
3845 pSReg->u64Base = 0;
3846 }
3847}
3848
3849
3850/**
3851 * Loads a segment selector during a task switch in protected mode.
3852 *
3853 * In this task switch scenario, we would throw \#TS exceptions rather than
3854 * \#GPs.
3855 *
3856 * @returns VBox strict status code.
3857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3858 * @param pSReg Pointer to the segment register.
3859 * @param uSel The new selector value.
3860 *
3861 * @remarks This does _not_ handle CS or SS.
3862 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3863 */
3864IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3865{
3866 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3867
3868 /* Null data selector. */
3869 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3870 {
3871 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3872 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3873 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3874 return VINF_SUCCESS;
3875 }
3876
3877 /* Fetch the descriptor. */
3878 IEMSELDESC Desc;
3879 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3880 if (rcStrict != VINF_SUCCESS)
3881 {
3882 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3883 VBOXSTRICTRC_VAL(rcStrict)));
3884 return rcStrict;
3885 }
3886
3887 /* Must be a data segment or readable code segment. */
3888 if ( !Desc.Legacy.Gen.u1DescType
3889 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3890 {
3891 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3892 Desc.Legacy.Gen.u4Type));
3893 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3894 }
3895
3896 /* Check privileges for data segments and non-conforming code segments. */
3897 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3898 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3899 {
3900 /* The RPL and the new CPL must be less than or equal to the DPL. */
3901 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3902 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3903 {
3904 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3905 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3906 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3907 }
3908 }
3909
3910 /* Is it there? */
3911 if (!Desc.Legacy.Gen.u1Present)
3912 {
3913 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3914 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3915 }
3916
3917 /* The base and limit. */
3918 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3919 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3920
3921 /*
3922 * Ok, everything checked out fine. Now set the accessed bit before
3923 * committing the result into the registers.
3924 */
3925 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3926 {
3927 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3928 if (rcStrict != VINF_SUCCESS)
3929 return rcStrict;
3930 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3931 }
3932
3933 /* Commit */
3934 pSReg->Sel = uSel;
3935 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3936 pSReg->u32Limit = cbLimit;
3937 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3938 pSReg->ValidSel = uSel;
3939 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3940 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3941 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3942
3943 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3944 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3945 return VINF_SUCCESS;
3946}
3947
3948
3949/**
3950 * Performs a task switch.
3951 *
3952 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3953 * caller is responsible for performing the necessary checks (like DPL, TSS
3954 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3955 * reference for JMP, CALL, IRET.
3956 *
3957 * If the task switch is the due to a software interrupt or hardware exception,
3958 * the caller is responsible for validating the TSS selector and descriptor. See
3959 * Intel Instruction reference for INT n.
3960 *
3961 * @returns VBox strict status code.
3962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3963 * @param pCtx The CPU context.
3964 * @param enmTaskSwitch What caused this task switch.
3965 * @param uNextEip The EIP effective after the task switch.
3966 * @param fFlags The flags.
3967 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3968 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3969 * @param SelTSS The TSS selector of the new task.
3970 * @param pNewDescTSS Pointer to the new TSS descriptor.
3971 */
3972IEM_STATIC VBOXSTRICTRC
3973iemTaskSwitch(PVMCPU pVCpu,
3974 PCPUMCTX pCtx,
3975 IEMTASKSWITCH enmTaskSwitch,
3976 uint32_t uNextEip,
3977 uint32_t fFlags,
3978 uint16_t uErr,
3979 uint64_t uCr2,
3980 RTSEL SelTSS,
3981 PIEMSELDESC pNewDescTSS)
3982{
3983 Assert(!IEM_IS_REAL_MODE(pVCpu));
3984 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3985
3986 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3987 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3988 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3989 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3990 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3991
3992 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3993 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3994
3995 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3996 fIsNewTSS386, pCtx->eip, uNextEip));
3997
3998 /* Update CR2 in case it's a page-fault. */
3999 /** @todo This should probably be done much earlier in IEM/PGM. See
4000 * @bugref{5653#c49}. */
4001 if (fFlags & IEM_XCPT_FLAGS_CR2)
4002 pCtx->cr2 = uCr2;
4003
4004 /*
4005 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4006 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4007 */
4008 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4009 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4010 if (uNewTSSLimit < uNewTSSLimitMin)
4011 {
4012 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4013 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4014 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4015 }
4016
4017 /*
4018 * Check the current TSS limit. The last written byte to the current TSS during the
4019 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4020 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4021 *
4022 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4023 * end up with smaller than "legal" TSS limits.
4024 */
4025 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
4026 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4027 if (uCurTSSLimit < uCurTSSLimitMin)
4028 {
4029 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4030 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4031 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4032 }
4033
4034 /*
4035 * Verify that the new TSS can be accessed and map it. Map only the required contents
4036 * and not the entire TSS.
4037 */
4038 void *pvNewTSS;
4039 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4040 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4041 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4042 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4043 * not perform correct translation if this happens. See Intel spec. 7.2.1
4044 * "Task-State Segment" */
4045 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4046 if (rcStrict != VINF_SUCCESS)
4047 {
4048 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4049 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4050 return rcStrict;
4051 }
4052
4053 /*
4054 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4055 */
4056 uint32_t u32EFlags = pCtx->eflags.u32;
4057 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4058 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4059 {
4060 PX86DESC pDescCurTSS;
4061 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4062 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4063 if (rcStrict != VINF_SUCCESS)
4064 {
4065 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4066 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4067 return rcStrict;
4068 }
4069
4070 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4071 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4072 if (rcStrict != VINF_SUCCESS)
4073 {
4074 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4075 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4076 return rcStrict;
4077 }
4078
4079 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4080 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4081 {
4082 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4083 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4084 u32EFlags &= ~X86_EFL_NT;
4085 }
4086 }
4087
4088 /*
4089 * Save the CPU state into the current TSS.
4090 */
4091 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4092 if (GCPtrNewTSS == GCPtrCurTSS)
4093 {
4094 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4095 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4096 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4097 }
4098 if (fIsNewTSS386)
4099 {
4100 /*
4101 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4102 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4103 */
4104 void *pvCurTSS32;
4105 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4106 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4107 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4108 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4109 if (rcStrict != VINF_SUCCESS)
4110 {
4111 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4112 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4113 return rcStrict;
4114 }
4115
4116 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4117 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4118 pCurTSS32->eip = uNextEip;
4119 pCurTSS32->eflags = u32EFlags;
4120 pCurTSS32->eax = pCtx->eax;
4121 pCurTSS32->ecx = pCtx->ecx;
4122 pCurTSS32->edx = pCtx->edx;
4123 pCurTSS32->ebx = pCtx->ebx;
4124 pCurTSS32->esp = pCtx->esp;
4125 pCurTSS32->ebp = pCtx->ebp;
4126 pCurTSS32->esi = pCtx->esi;
4127 pCurTSS32->edi = pCtx->edi;
4128 pCurTSS32->es = pCtx->es.Sel;
4129 pCurTSS32->cs = pCtx->cs.Sel;
4130 pCurTSS32->ss = pCtx->ss.Sel;
4131 pCurTSS32->ds = pCtx->ds.Sel;
4132 pCurTSS32->fs = pCtx->fs.Sel;
4133 pCurTSS32->gs = pCtx->gs.Sel;
4134
4135 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4136 if (rcStrict != VINF_SUCCESS)
4137 {
4138 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4139 VBOXSTRICTRC_VAL(rcStrict)));
4140 return rcStrict;
4141 }
4142 }
4143 else
4144 {
4145 /*
4146 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4147 */
4148 void *pvCurTSS16;
4149 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4150 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4151 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4152 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4153 if (rcStrict != VINF_SUCCESS)
4154 {
4155 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4156 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4157 return rcStrict;
4158 }
4159
4160 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4161 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4162 pCurTSS16->ip = uNextEip;
4163 pCurTSS16->flags = u32EFlags;
4164 pCurTSS16->ax = pCtx->ax;
4165 pCurTSS16->cx = pCtx->cx;
4166 pCurTSS16->dx = pCtx->dx;
4167 pCurTSS16->bx = pCtx->bx;
4168 pCurTSS16->sp = pCtx->sp;
4169 pCurTSS16->bp = pCtx->bp;
4170 pCurTSS16->si = pCtx->si;
4171 pCurTSS16->di = pCtx->di;
4172 pCurTSS16->es = pCtx->es.Sel;
4173 pCurTSS16->cs = pCtx->cs.Sel;
4174 pCurTSS16->ss = pCtx->ss.Sel;
4175 pCurTSS16->ds = pCtx->ds.Sel;
4176
4177 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4178 if (rcStrict != VINF_SUCCESS)
4179 {
4180 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4181 VBOXSTRICTRC_VAL(rcStrict)));
4182 return rcStrict;
4183 }
4184 }
4185
4186 /*
4187 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4188 */
4189 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4190 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4191 {
4192 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4193 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4194 pNewTSS->selPrev = pCtx->tr.Sel;
4195 }
4196
4197 /*
4198 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4199 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4200 */
4201 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4202 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4203 bool fNewDebugTrap;
4204 if (fIsNewTSS386)
4205 {
4206 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4207 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4208 uNewEip = pNewTSS32->eip;
4209 uNewEflags = pNewTSS32->eflags;
4210 uNewEax = pNewTSS32->eax;
4211 uNewEcx = pNewTSS32->ecx;
4212 uNewEdx = pNewTSS32->edx;
4213 uNewEbx = pNewTSS32->ebx;
4214 uNewEsp = pNewTSS32->esp;
4215 uNewEbp = pNewTSS32->ebp;
4216 uNewEsi = pNewTSS32->esi;
4217 uNewEdi = pNewTSS32->edi;
4218 uNewES = pNewTSS32->es;
4219 uNewCS = pNewTSS32->cs;
4220 uNewSS = pNewTSS32->ss;
4221 uNewDS = pNewTSS32->ds;
4222 uNewFS = pNewTSS32->fs;
4223 uNewGS = pNewTSS32->gs;
4224 uNewLdt = pNewTSS32->selLdt;
4225 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4226 }
4227 else
4228 {
4229 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4230 uNewCr3 = 0;
4231 uNewEip = pNewTSS16->ip;
4232 uNewEflags = pNewTSS16->flags;
4233 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4234 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4235 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4236 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4237 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4238 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4239 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4240 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4241 uNewES = pNewTSS16->es;
4242 uNewCS = pNewTSS16->cs;
4243 uNewSS = pNewTSS16->ss;
4244 uNewDS = pNewTSS16->ds;
4245 uNewFS = 0;
4246 uNewGS = 0;
4247 uNewLdt = pNewTSS16->selLdt;
4248 fNewDebugTrap = false;
4249 }
4250
4251 if (GCPtrNewTSS == GCPtrCurTSS)
4252 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4253 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4254
4255 /*
4256 * We're done accessing the new TSS.
4257 */
4258 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4259 if (rcStrict != VINF_SUCCESS)
4260 {
4261 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4262 return rcStrict;
4263 }
4264
4265 /*
4266 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4267 */
4268 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4269 {
4270 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4271 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4272 if (rcStrict != VINF_SUCCESS)
4273 {
4274 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4275 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4276 return rcStrict;
4277 }
4278
4279 /* Check that the descriptor indicates the new TSS is available (not busy). */
4280 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4281 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4282 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4283
4284 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4285 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4286 if (rcStrict != VINF_SUCCESS)
4287 {
4288 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4289 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4290 return rcStrict;
4291 }
4292 }
4293
4294 /*
4295 * From this point on, we're technically in the new task. We will defer exceptions
4296 * until the completion of the task switch but before executing any instructions in the new task.
4297 */
4298 pCtx->tr.Sel = SelTSS;
4299 pCtx->tr.ValidSel = SelTSS;
4300 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4301 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4302 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4303 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4304 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4305
4306 /* Set the busy bit in TR. */
4307 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4308 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4309 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4310 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4311 {
4312 uNewEflags |= X86_EFL_NT;
4313 }
4314
4315 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4316 pCtx->cr0 |= X86_CR0_TS;
4317 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4318
4319 pCtx->eip = uNewEip;
4320 pCtx->eax = uNewEax;
4321 pCtx->ecx = uNewEcx;
4322 pCtx->edx = uNewEdx;
4323 pCtx->ebx = uNewEbx;
4324 pCtx->esp = uNewEsp;
4325 pCtx->ebp = uNewEbp;
4326 pCtx->esi = uNewEsi;
4327 pCtx->edi = uNewEdi;
4328
4329 uNewEflags &= X86_EFL_LIVE_MASK;
4330 uNewEflags |= X86_EFL_RA1_MASK;
4331 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4332
4333 /*
4334 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4335 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4336 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4337 */
4338 pCtx->es.Sel = uNewES;
4339 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4340
4341 pCtx->cs.Sel = uNewCS;
4342 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4343
4344 pCtx->ss.Sel = uNewSS;
4345 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4346
4347 pCtx->ds.Sel = uNewDS;
4348 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4349
4350 pCtx->fs.Sel = uNewFS;
4351 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4352
4353 pCtx->gs.Sel = uNewGS;
4354 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4355 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4356
4357 pCtx->ldtr.Sel = uNewLdt;
4358 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4359 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4360 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4361
4362 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4363 {
4364 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4365 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4366 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4367 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4368 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4369 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4370 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4371 }
4372
4373 /*
4374 * Switch CR3 for the new task.
4375 */
4376 if ( fIsNewTSS386
4377 && (pCtx->cr0 & X86_CR0_PG))
4378 {
4379 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4380 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4381 {
4382 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4383 AssertRCSuccessReturn(rc, rc);
4384 }
4385 else
4386 pCtx->cr3 = uNewCr3;
4387
4388 /* Inform PGM. */
4389 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4390 {
4391 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4392 AssertRCReturn(rc, rc);
4393 /* ignore informational status codes */
4394 }
4395 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4396 }
4397
4398 /*
4399 * Switch LDTR for the new task.
4400 */
4401 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4402 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4403 else
4404 {
4405 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4406
4407 IEMSELDESC DescNewLdt;
4408 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4409 if (rcStrict != VINF_SUCCESS)
4410 {
4411 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4412 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4413 return rcStrict;
4414 }
4415 if ( !DescNewLdt.Legacy.Gen.u1Present
4416 || DescNewLdt.Legacy.Gen.u1DescType
4417 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4418 {
4419 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4420 uNewLdt, DescNewLdt.Legacy.u));
4421 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4422 }
4423
4424 pCtx->ldtr.ValidSel = uNewLdt;
4425 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4426 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4427 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4428 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4429 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4430 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4431 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4432 }
4433
4434 IEMSELDESC DescSS;
4435 if (IEM_IS_V86_MODE(pVCpu))
4436 {
4437 pVCpu->iem.s.uCpl = 3;
4438 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4439 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4440 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4441 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4442 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4443 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4444
4445 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4446 DescSS.Legacy.u = 0;
4447 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4448 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4449 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4450 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4451 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4452 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4453 DescSS.Legacy.Gen.u2Dpl = 3;
4454 }
4455 else
4456 {
4457 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4458
4459 /*
4460 * Load the stack segment for the new task.
4461 */
4462 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4463 {
4464 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4465 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4466 }
4467
4468 /* Fetch the descriptor. */
4469 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4470 if (rcStrict != VINF_SUCCESS)
4471 {
4472 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4473 VBOXSTRICTRC_VAL(rcStrict)));
4474 return rcStrict;
4475 }
4476
4477 /* SS must be a data segment and writable. */
4478 if ( !DescSS.Legacy.Gen.u1DescType
4479 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4480 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4481 {
4482 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4483 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4484 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4485 }
4486
4487 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4488 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4489 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4490 {
4491 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4492 uNewCpl));
4493 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4494 }
4495
4496 /* Is it there? */
4497 if (!DescSS.Legacy.Gen.u1Present)
4498 {
4499 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4500 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4501 }
4502
4503 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4504 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4505
4506 /* Set the accessed bit before committing the result into SS. */
4507 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4508 {
4509 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4510 if (rcStrict != VINF_SUCCESS)
4511 return rcStrict;
4512 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4513 }
4514
4515 /* Commit SS. */
4516 pCtx->ss.Sel = uNewSS;
4517 pCtx->ss.ValidSel = uNewSS;
4518 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4519 pCtx->ss.u32Limit = cbLimit;
4520 pCtx->ss.u64Base = u64Base;
4521 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4522 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4523
4524 /* CPL has changed, update IEM before loading rest of segments. */
4525 pVCpu->iem.s.uCpl = uNewCpl;
4526
4527 /*
4528 * Load the data segments for the new task.
4529 */
4530 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4531 if (rcStrict != VINF_SUCCESS)
4532 return rcStrict;
4533 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4534 if (rcStrict != VINF_SUCCESS)
4535 return rcStrict;
4536 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4537 if (rcStrict != VINF_SUCCESS)
4538 return rcStrict;
4539 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4540 if (rcStrict != VINF_SUCCESS)
4541 return rcStrict;
4542
4543 /*
4544 * Load the code segment for the new task.
4545 */
4546 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4547 {
4548 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4549 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4550 }
4551
4552 /* Fetch the descriptor. */
4553 IEMSELDESC DescCS;
4554 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4555 if (rcStrict != VINF_SUCCESS)
4556 {
4557 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4558 return rcStrict;
4559 }
4560
4561 /* CS must be a code segment. */
4562 if ( !DescCS.Legacy.Gen.u1DescType
4563 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4564 {
4565 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4566 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4567 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4568 }
4569
4570 /* For conforming CS, DPL must be less than or equal to the RPL. */
4571 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4572 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4573 {
4574 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4575 DescCS.Legacy.Gen.u2Dpl));
4576 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4577 }
4578
4579 /* For non-conforming CS, DPL must match RPL. */
4580 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4581 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4582 {
4583 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4584 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4585 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4586 }
4587
4588 /* Is it there? */
4589 if (!DescCS.Legacy.Gen.u1Present)
4590 {
4591 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4592 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4593 }
4594
4595 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4596 u64Base = X86DESC_BASE(&DescCS.Legacy);
4597
4598 /* Set the accessed bit before committing the result into CS. */
4599 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4600 {
4601 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4602 if (rcStrict != VINF_SUCCESS)
4603 return rcStrict;
4604 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4605 }
4606
4607 /* Commit CS. */
4608 pCtx->cs.Sel = uNewCS;
4609 pCtx->cs.ValidSel = uNewCS;
4610 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4611 pCtx->cs.u32Limit = cbLimit;
4612 pCtx->cs.u64Base = u64Base;
4613 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4614 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4615 }
4616
4617 /** @todo Debug trap. */
4618 if (fIsNewTSS386 && fNewDebugTrap)
4619 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4620
4621 /*
4622 * Construct the error code masks based on what caused this task switch.
4623 * See Intel Instruction reference for INT.
4624 */
4625 uint16_t uExt;
4626 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4627 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4628 {
4629 uExt = 1;
4630 }
4631 else
4632 uExt = 0;
4633
4634 /*
4635 * Push any error code on to the new stack.
4636 */
4637 if (fFlags & IEM_XCPT_FLAGS_ERR)
4638 {
4639 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4640 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4641 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4642
4643 /* Check that there is sufficient space on the stack. */
4644 /** @todo Factor out segment limit checking for normal/expand down segments
4645 * into a separate function. */
4646 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4647 {
4648 if ( pCtx->esp - 1 > cbLimitSS
4649 || pCtx->esp < cbStackFrame)
4650 {
4651 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4652 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4653 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4654 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4655 }
4656 }
4657 else
4658 {
4659 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4660 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4661 {
4662 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4663 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4664 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4665 }
4666 }
4667
4668
4669 if (fIsNewTSS386)
4670 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4671 else
4672 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4673 if (rcStrict != VINF_SUCCESS)
4674 {
4675 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4676 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4677 return rcStrict;
4678 }
4679 }
4680
4681 /* Check the new EIP against the new CS limit. */
4682 if (pCtx->eip > pCtx->cs.u32Limit)
4683 {
4684 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4685 pCtx->eip, pCtx->cs.u32Limit));
4686 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4687 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4688 }
4689
4690 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4691 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4692}
4693
4694
4695/**
4696 * Implements exceptions and interrupts for protected mode.
4697 *
4698 * @returns VBox strict status code.
4699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4700 * @param pCtx The CPU context.
4701 * @param cbInstr The number of bytes to offset rIP by in the return
4702 * address.
4703 * @param u8Vector The interrupt / exception vector number.
4704 * @param fFlags The flags.
4705 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4706 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4707 */
4708IEM_STATIC VBOXSTRICTRC
4709iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4710 PCPUMCTX pCtx,
4711 uint8_t cbInstr,
4712 uint8_t u8Vector,
4713 uint32_t fFlags,
4714 uint16_t uErr,
4715 uint64_t uCr2)
4716{
4717 /*
4718 * Read the IDT entry.
4719 */
4720 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4721 {
4722 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4723 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4724 }
4725 X86DESC Idte;
4726 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4727 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4728 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4729 return rcStrict;
4730 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4731 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4732 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4733
4734 /*
4735 * Check the descriptor type, DPL and such.
4736 * ASSUMES this is done in the same order as described for call-gate calls.
4737 */
4738 if (Idte.Gate.u1DescType)
4739 {
4740 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4741 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4742 }
4743 bool fTaskGate = false;
4744 uint8_t f32BitGate = true;
4745 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4746 switch (Idte.Gate.u4Type)
4747 {
4748 case X86_SEL_TYPE_SYS_UNDEFINED:
4749 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4750 case X86_SEL_TYPE_SYS_LDT:
4751 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4752 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4753 case X86_SEL_TYPE_SYS_UNDEFINED2:
4754 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4755 case X86_SEL_TYPE_SYS_UNDEFINED3:
4756 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4757 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4758 case X86_SEL_TYPE_SYS_UNDEFINED4:
4759 {
4760 /** @todo check what actually happens when the type is wrong...
4761 * esp. call gates. */
4762 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4763 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4764 }
4765
4766 case X86_SEL_TYPE_SYS_286_INT_GATE:
4767 f32BitGate = false;
4768 /* fall thru */
4769 case X86_SEL_TYPE_SYS_386_INT_GATE:
4770 fEflToClear |= X86_EFL_IF;
4771 break;
4772
4773 case X86_SEL_TYPE_SYS_TASK_GATE:
4774 fTaskGate = true;
4775#ifndef IEM_IMPLEMENTS_TASKSWITCH
4776 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4777#endif
4778 break;
4779
4780 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4781 f32BitGate = false;
4782 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4783 break;
4784
4785 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4786 }
4787
4788 /* Check DPL against CPL if applicable. */
4789 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4790 {
4791 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4792 {
4793 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4794 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4795 }
4796 }
4797
4798 /* Is it there? */
4799 if (!Idte.Gate.u1Present)
4800 {
4801 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4802 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4803 }
4804
4805 /* Is it a task-gate? */
4806 if (fTaskGate)
4807 {
4808 /*
4809 * Construct the error code masks based on what caused this task switch.
4810 * See Intel Instruction reference for INT.
4811 */
4812 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4813 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4814 RTSEL SelTSS = Idte.Gate.u16Sel;
4815
4816 /*
4817 * Fetch the TSS descriptor in the GDT.
4818 */
4819 IEMSELDESC DescTSS;
4820 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4821 if (rcStrict != VINF_SUCCESS)
4822 {
4823 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4824 VBOXSTRICTRC_VAL(rcStrict)));
4825 return rcStrict;
4826 }
4827
4828 /* The TSS descriptor must be a system segment and be available (not busy). */
4829 if ( DescTSS.Legacy.Gen.u1DescType
4830 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4831 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4832 {
4833 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4834 u8Vector, SelTSS, DescTSS.Legacy.au64));
4835 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4836 }
4837
4838 /* The TSS must be present. */
4839 if (!DescTSS.Legacy.Gen.u1Present)
4840 {
4841 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4842 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4843 }
4844
4845 /* Do the actual task switch. */
4846 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4847 }
4848
4849 /* A null CS is bad. */
4850 RTSEL NewCS = Idte.Gate.u16Sel;
4851 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4852 {
4853 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4854 return iemRaiseGeneralProtectionFault0(pVCpu);
4855 }
4856
4857 /* Fetch the descriptor for the new CS. */
4858 IEMSELDESC DescCS;
4859 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4860 if (rcStrict != VINF_SUCCESS)
4861 {
4862 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4863 return rcStrict;
4864 }
4865
4866 /* Must be a code segment. */
4867 if (!DescCS.Legacy.Gen.u1DescType)
4868 {
4869 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4870 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4871 }
4872 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4873 {
4874 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4875 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4876 }
4877
4878 /* Don't allow lowering the privilege level. */
4879 /** @todo Does the lowering of privileges apply to software interrupts
4880 * only? This has bearings on the more-privileged or
4881 * same-privilege stack behavior further down. A testcase would
4882 * be nice. */
4883 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4884 {
4885 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4886 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4887 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4888 }
4889
4890 /* Make sure the selector is present. */
4891 if (!DescCS.Legacy.Gen.u1Present)
4892 {
4893 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4894 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4895 }
4896
4897 /* Check the new EIP against the new CS limit. */
4898 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4899 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4900 ? Idte.Gate.u16OffsetLow
4901 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4902 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4903 if (uNewEip > cbLimitCS)
4904 {
4905 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4906 u8Vector, uNewEip, cbLimitCS, NewCS));
4907 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4908 }
4909 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4910
4911 /* Calc the flag image to push. */
4912 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4913 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4914 fEfl &= ~X86_EFL_RF;
4915 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4916 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4917
4918 /* From V8086 mode only go to CPL 0. */
4919 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4920 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4921 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4922 {
4923 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4924 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4925 }
4926
4927 /*
4928 * If the privilege level changes, we need to get a new stack from the TSS.
4929 * This in turns means validating the new SS and ESP...
4930 */
4931 if (uNewCpl != pVCpu->iem.s.uCpl)
4932 {
4933 RTSEL NewSS;
4934 uint32_t uNewEsp;
4935 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4936 if (rcStrict != VINF_SUCCESS)
4937 return rcStrict;
4938
4939 IEMSELDESC DescSS;
4940 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4941 if (rcStrict != VINF_SUCCESS)
4942 return rcStrict;
4943 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4944 if (!DescSS.Legacy.Gen.u1DefBig)
4945 {
4946 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4947 uNewEsp = (uint16_t)uNewEsp;
4948 }
4949
4950 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4951
4952 /* Check that there is sufficient space for the stack frame. */
4953 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4954 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4955 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4956 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4957
4958 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4959 {
4960 if ( uNewEsp - 1 > cbLimitSS
4961 || uNewEsp < cbStackFrame)
4962 {
4963 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4964 u8Vector, NewSS, uNewEsp, cbStackFrame));
4965 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4966 }
4967 }
4968 else
4969 {
4970 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4971 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4972 {
4973 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4974 u8Vector, NewSS, uNewEsp, cbStackFrame));
4975 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4976 }
4977 }
4978
4979 /*
4980 * Start making changes.
4981 */
4982
4983 /* Set the new CPL so that stack accesses use it. */
4984 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4985 pVCpu->iem.s.uCpl = uNewCpl;
4986
4987 /* Create the stack frame. */
4988 RTPTRUNION uStackFrame;
4989 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4990 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4991 if (rcStrict != VINF_SUCCESS)
4992 return rcStrict;
4993 void * const pvStackFrame = uStackFrame.pv;
4994 if (f32BitGate)
4995 {
4996 if (fFlags & IEM_XCPT_FLAGS_ERR)
4997 *uStackFrame.pu32++ = uErr;
4998 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4999 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5000 uStackFrame.pu32[2] = fEfl;
5001 uStackFrame.pu32[3] = pCtx->esp;
5002 uStackFrame.pu32[4] = pCtx->ss.Sel;
5003 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
5004 if (fEfl & X86_EFL_VM)
5005 {
5006 uStackFrame.pu32[1] = pCtx->cs.Sel;
5007 uStackFrame.pu32[5] = pCtx->es.Sel;
5008 uStackFrame.pu32[6] = pCtx->ds.Sel;
5009 uStackFrame.pu32[7] = pCtx->fs.Sel;
5010 uStackFrame.pu32[8] = pCtx->gs.Sel;
5011 }
5012 }
5013 else
5014 {
5015 if (fFlags & IEM_XCPT_FLAGS_ERR)
5016 *uStackFrame.pu16++ = uErr;
5017 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
5018 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5019 uStackFrame.pu16[2] = fEfl;
5020 uStackFrame.pu16[3] = pCtx->sp;
5021 uStackFrame.pu16[4] = pCtx->ss.Sel;
5022 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
5023 if (fEfl & X86_EFL_VM)
5024 {
5025 uStackFrame.pu16[1] = pCtx->cs.Sel;
5026 uStackFrame.pu16[5] = pCtx->es.Sel;
5027 uStackFrame.pu16[6] = pCtx->ds.Sel;
5028 uStackFrame.pu16[7] = pCtx->fs.Sel;
5029 uStackFrame.pu16[8] = pCtx->gs.Sel;
5030 }
5031 }
5032 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5033 if (rcStrict != VINF_SUCCESS)
5034 return rcStrict;
5035
5036 /* Mark the selectors 'accessed' (hope this is the correct time). */
5037 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5038 * after pushing the stack frame? (Write protect the gdt + stack to
5039 * find out.) */
5040 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5041 {
5042 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5043 if (rcStrict != VINF_SUCCESS)
5044 return rcStrict;
5045 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5046 }
5047
5048 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5049 {
5050 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5051 if (rcStrict != VINF_SUCCESS)
5052 return rcStrict;
5053 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5054 }
5055
5056 /*
5057 * Start comitting the register changes (joins with the DPL=CPL branch).
5058 */
5059 pCtx->ss.Sel = NewSS;
5060 pCtx->ss.ValidSel = NewSS;
5061 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5062 pCtx->ss.u32Limit = cbLimitSS;
5063 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5064 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5065 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5066 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5067 * SP is loaded).
5068 * Need to check the other combinations too:
5069 * - 16-bit TSS, 32-bit handler
5070 * - 32-bit TSS, 16-bit handler */
5071 if (!pCtx->ss.Attr.n.u1DefBig)
5072 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5073 else
5074 pCtx->rsp = uNewEsp - cbStackFrame;
5075
5076 if (fEfl & X86_EFL_VM)
5077 {
5078 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5079 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5080 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5081 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5082 }
5083 }
5084 /*
5085 * Same privilege, no stack change and smaller stack frame.
5086 */
5087 else
5088 {
5089 uint64_t uNewRsp;
5090 RTPTRUNION uStackFrame;
5091 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5092 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5093 if (rcStrict != VINF_SUCCESS)
5094 return rcStrict;
5095 void * const pvStackFrame = uStackFrame.pv;
5096
5097 if (f32BitGate)
5098 {
5099 if (fFlags & IEM_XCPT_FLAGS_ERR)
5100 *uStackFrame.pu32++ = uErr;
5101 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5102 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5103 uStackFrame.pu32[2] = fEfl;
5104 }
5105 else
5106 {
5107 if (fFlags & IEM_XCPT_FLAGS_ERR)
5108 *uStackFrame.pu16++ = uErr;
5109 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5110 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5111 uStackFrame.pu16[2] = fEfl;
5112 }
5113 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5114 if (rcStrict != VINF_SUCCESS)
5115 return rcStrict;
5116
5117 /* Mark the CS selector as 'accessed'. */
5118 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5119 {
5120 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5121 if (rcStrict != VINF_SUCCESS)
5122 return rcStrict;
5123 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5124 }
5125
5126 /*
5127 * Start committing the register changes (joins with the other branch).
5128 */
5129 pCtx->rsp = uNewRsp;
5130 }
5131
5132 /* ... register committing continues. */
5133 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5134 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5135 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5136 pCtx->cs.u32Limit = cbLimitCS;
5137 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5138 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5139
5140 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5141 fEfl &= ~fEflToClear;
5142 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5143
5144 if (fFlags & IEM_XCPT_FLAGS_CR2)
5145 pCtx->cr2 = uCr2;
5146
5147 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5148 iemRaiseXcptAdjustState(pCtx, u8Vector);
5149
5150 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5151}
5152
5153
5154/**
5155 * Implements exceptions and interrupts for long mode.
5156 *
5157 * @returns VBox strict status code.
5158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5159 * @param pCtx The CPU context.
5160 * @param cbInstr The number of bytes to offset rIP by in the return
5161 * address.
5162 * @param u8Vector The interrupt / exception vector number.
5163 * @param fFlags The flags.
5164 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5165 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5166 */
5167IEM_STATIC VBOXSTRICTRC
5168iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5169 PCPUMCTX pCtx,
5170 uint8_t cbInstr,
5171 uint8_t u8Vector,
5172 uint32_t fFlags,
5173 uint16_t uErr,
5174 uint64_t uCr2)
5175{
5176 /*
5177 * Read the IDT entry.
5178 */
5179 uint16_t offIdt = (uint16_t)u8Vector << 4;
5180 if (pCtx->idtr.cbIdt < offIdt + 7)
5181 {
5182 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5183 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5184 }
5185 X86DESC64 Idte;
5186 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5187 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5188 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5189 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5190 return rcStrict;
5191 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5192 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5193 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5194
5195 /*
5196 * Check the descriptor type, DPL and such.
5197 * ASSUMES this is done in the same order as described for call-gate calls.
5198 */
5199 if (Idte.Gate.u1DescType)
5200 {
5201 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5202 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5203 }
5204 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5205 switch (Idte.Gate.u4Type)
5206 {
5207 case AMD64_SEL_TYPE_SYS_INT_GATE:
5208 fEflToClear |= X86_EFL_IF;
5209 break;
5210 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5211 break;
5212
5213 default:
5214 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5215 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5216 }
5217
5218 /* Check DPL against CPL if applicable. */
5219 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5220 {
5221 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5222 {
5223 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5224 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5225 }
5226 }
5227
5228 /* Is it there? */
5229 if (!Idte.Gate.u1Present)
5230 {
5231 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5232 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5233 }
5234
5235 /* A null CS is bad. */
5236 RTSEL NewCS = Idte.Gate.u16Sel;
5237 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5238 {
5239 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5240 return iemRaiseGeneralProtectionFault0(pVCpu);
5241 }
5242
5243 /* Fetch the descriptor for the new CS. */
5244 IEMSELDESC DescCS;
5245 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5246 if (rcStrict != VINF_SUCCESS)
5247 {
5248 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5249 return rcStrict;
5250 }
5251
5252 /* Must be a 64-bit code segment. */
5253 if (!DescCS.Long.Gen.u1DescType)
5254 {
5255 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5256 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5257 }
5258 if ( !DescCS.Long.Gen.u1Long
5259 || DescCS.Long.Gen.u1DefBig
5260 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5261 {
5262 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5263 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5264 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5265 }
5266
5267 /* Don't allow lowering the privilege level. For non-conforming CS
5268 selectors, the CS.DPL sets the privilege level the trap/interrupt
5269 handler runs at. For conforming CS selectors, the CPL remains
5270 unchanged, but the CS.DPL must be <= CPL. */
5271 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5272 * when CPU in Ring-0. Result \#GP? */
5273 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5274 {
5275 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5276 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5277 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5278 }
5279
5280
5281 /* Make sure the selector is present. */
5282 if (!DescCS.Legacy.Gen.u1Present)
5283 {
5284 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5285 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5286 }
5287
5288 /* Check that the new RIP is canonical. */
5289 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5290 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5291 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5292 if (!IEM_IS_CANONICAL(uNewRip))
5293 {
5294 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5295 return iemRaiseGeneralProtectionFault0(pVCpu);
5296 }
5297
5298 /*
5299 * If the privilege level changes or if the IST isn't zero, we need to get
5300 * a new stack from the TSS.
5301 */
5302 uint64_t uNewRsp;
5303 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5304 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5305 if ( uNewCpl != pVCpu->iem.s.uCpl
5306 || Idte.Gate.u3IST != 0)
5307 {
5308 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5309 if (rcStrict != VINF_SUCCESS)
5310 return rcStrict;
5311 }
5312 else
5313 uNewRsp = pCtx->rsp;
5314 uNewRsp &= ~(uint64_t)0xf;
5315
5316 /*
5317 * Calc the flag image to push.
5318 */
5319 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5320 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5321 fEfl &= ~X86_EFL_RF;
5322 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5323 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5324
5325 /*
5326 * Start making changes.
5327 */
5328 /* Set the new CPL so that stack accesses use it. */
5329 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5330 pVCpu->iem.s.uCpl = uNewCpl;
5331
5332 /* Create the stack frame. */
5333 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5334 RTPTRUNION uStackFrame;
5335 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5336 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5337 if (rcStrict != VINF_SUCCESS)
5338 return rcStrict;
5339 void * const pvStackFrame = uStackFrame.pv;
5340
5341 if (fFlags & IEM_XCPT_FLAGS_ERR)
5342 *uStackFrame.pu64++ = uErr;
5343 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5344 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5345 uStackFrame.pu64[2] = fEfl;
5346 uStackFrame.pu64[3] = pCtx->rsp;
5347 uStackFrame.pu64[4] = pCtx->ss.Sel;
5348 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5349 if (rcStrict != VINF_SUCCESS)
5350 return rcStrict;
5351
5352 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5353 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5354 * after pushing the stack frame? (Write protect the gdt + stack to
5355 * find out.) */
5356 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5357 {
5358 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5359 if (rcStrict != VINF_SUCCESS)
5360 return rcStrict;
5361 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5362 }
5363
5364 /*
5365 * Start comitting the register changes.
5366 */
5367 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5368 * hidden registers when interrupting 32-bit or 16-bit code! */
5369 if (uNewCpl != uOldCpl)
5370 {
5371 pCtx->ss.Sel = 0 | uNewCpl;
5372 pCtx->ss.ValidSel = 0 | uNewCpl;
5373 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5374 pCtx->ss.u32Limit = UINT32_MAX;
5375 pCtx->ss.u64Base = 0;
5376 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5377 }
5378 pCtx->rsp = uNewRsp - cbStackFrame;
5379 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5380 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5381 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5382 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5383 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5384 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5385 pCtx->rip = uNewRip;
5386
5387 fEfl &= ~fEflToClear;
5388 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5389
5390 if (fFlags & IEM_XCPT_FLAGS_CR2)
5391 pCtx->cr2 = uCr2;
5392
5393 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5394 iemRaiseXcptAdjustState(pCtx, u8Vector);
5395
5396 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5397}
5398
5399
5400/**
5401 * Implements exceptions and interrupts.
5402 *
5403 * All exceptions and interrupts goes thru this function!
5404 *
5405 * @returns VBox strict status code.
5406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5407 * @param cbInstr The number of bytes to offset rIP by in the return
5408 * address.
5409 * @param u8Vector The interrupt / exception vector number.
5410 * @param fFlags The flags.
5411 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5412 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5413 */
5414DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5415iemRaiseXcptOrInt(PVMCPU pVCpu,
5416 uint8_t cbInstr,
5417 uint8_t u8Vector,
5418 uint32_t fFlags,
5419 uint16_t uErr,
5420 uint64_t uCr2)
5421{
5422 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5423#ifdef IN_RING0
5424 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5425 AssertRCReturn(rc, rc);
5426#endif
5427
5428#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5429 /*
5430 * Flush prefetch buffer
5431 */
5432 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5433#endif
5434
5435 /*
5436 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5437 */
5438 if ( pCtx->eflags.Bits.u1VM
5439 && pCtx->eflags.Bits.u2IOPL != 3
5440 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5441 && (pCtx->cr0 & X86_CR0_PE) )
5442 {
5443 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5444 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5445 u8Vector = X86_XCPT_GP;
5446 uErr = 0;
5447 }
5448#ifdef DBGFTRACE_ENABLED
5449 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5450 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5451 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5452#endif
5453
5454#ifdef VBOX_WITH_NESTED_HWVIRT
5455 if (IEM_IS_SVM_ENABLED(pVCpu))
5456 {
5457 /*
5458 * If the event is being injected as part of VMRUN, it isn't subject to event
5459 * intercepts in the nested-guest. However, secondary exceptions that occur
5460 * during injection of any event -are- subject to exception intercepts.
5461 * See AMD spec. 15.20 "Event Injection".
5462 */
5463 if (!pCtx->hwvirt.svm.fInterceptEvents)
5464 pCtx->hwvirt.svm.fInterceptEvents = 1;
5465 else
5466 {
5467 /*
5468 * Check and handle if the event being raised is intercepted.
5469 */
5470 VBOXSTRICTRC rcStrict0 = iemHandleSvmNstGstEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5471 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5472 return rcStrict0;
5473 }
5474 }
5475#endif /* VBOX_WITH_NESTED_HWVIRT */
5476
5477 /*
5478 * Do recursion accounting.
5479 */
5480 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5481 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5482 if (pVCpu->iem.s.cXcptRecursions == 0)
5483 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5484 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5485 else
5486 {
5487 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5488 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5489 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5490
5491 if (pVCpu->iem.s.cXcptRecursions >= 3)
5492 {
5493#ifdef DEBUG_bird
5494 AssertFailed();
5495#endif
5496 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5497 }
5498
5499 /*
5500 * Evaluate the sequence of recurring events.
5501 */
5502 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5503 NULL /* pXcptRaiseInfo */);
5504 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5505 { /* likely */ }
5506 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5507 {
5508 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5509 u8Vector = X86_XCPT_DF;
5510 uErr = 0;
5511 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5512 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5513 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5514 }
5515 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5516 {
5517 Log2(("iemRaiseXcptOrInt: raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5518 return iemInitiateCpuShutdown(pVCpu);
5519 }
5520 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5521 {
5522 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5523 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5524 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5525 return VERR_EM_GUEST_CPU_HANG;
5526 }
5527 else
5528 {
5529 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5530 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5531 return VERR_IEM_IPE_9;
5532 }
5533
5534 /*
5535 * The 'EXT' bit is set when an exception occurs during deliver of an external
5536 * event (such as an interrupt or earlier exception), see Intel spec. 6.13
5537 * "Error Code".
5538 *
5539 * For exceptions generated by software interrupts and INTO, INT3 instructions,
5540 * the 'EXT' bit will not be set, see Intel Instruction reference for INT n.
5541 */
5542 /** @todo Would INT1/ICEBP raised \#DB set the 'EXT' bit or not? Testcase... */
5543 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT))
5544 && (fFlags & IEM_XCPT_FLAGS_ERR)
5545 && u8Vector != X86_XCPT_PF
5546 && u8Vector != X86_XCPT_DF)
5547 {
5548 uErr |= X86_TRAP_ERR_EXTERNAL;
5549 }
5550 }
5551
5552 pVCpu->iem.s.cXcptRecursions++;
5553 pVCpu->iem.s.uCurXcpt = u8Vector;
5554 pVCpu->iem.s.fCurXcpt = fFlags;
5555 pVCpu->iem.s.uCurXcptErr = uErr;
5556 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5557
5558 /*
5559 * Extensive logging.
5560 */
5561#if defined(LOG_ENABLED) && defined(IN_RING3)
5562 if (LogIs3Enabled())
5563 {
5564 PVM pVM = pVCpu->CTX_SUFF(pVM);
5565 char szRegs[4096];
5566 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5567 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5568 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5569 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5570 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5571 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5572 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5573 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5574 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5575 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5576 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5577 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5578 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5579 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5580 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5581 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5582 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5583 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5584 " efer=%016VR{efer}\n"
5585 " pat=%016VR{pat}\n"
5586 " sf_mask=%016VR{sf_mask}\n"
5587 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5588 " lstar=%016VR{lstar}\n"
5589 " star=%016VR{star} cstar=%016VR{cstar}\n"
5590 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5591 );
5592
5593 char szInstr[256];
5594 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5595 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5596 szInstr, sizeof(szInstr), NULL);
5597 Log3(("%s%s\n", szRegs, szInstr));
5598 }
5599#endif /* LOG_ENABLED */
5600
5601 /*
5602 * Call the mode specific worker function.
5603 */
5604 VBOXSTRICTRC rcStrict;
5605 if (!(pCtx->cr0 & X86_CR0_PE))
5606 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5607 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5608 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5609 else
5610 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5611
5612 /* Flush the prefetch buffer. */
5613#ifdef IEM_WITH_CODE_TLB
5614 pVCpu->iem.s.pbInstrBuf = NULL;
5615#else
5616 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5617#endif
5618
5619 /*
5620 * Unwind.
5621 */
5622 pVCpu->iem.s.cXcptRecursions--;
5623 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5624 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5625 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5626 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5627 return rcStrict;
5628}
5629
5630#ifdef IEM_WITH_SETJMP
5631/**
5632 * See iemRaiseXcptOrInt. Will not return.
5633 */
5634IEM_STATIC DECL_NO_RETURN(void)
5635iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5636 uint8_t cbInstr,
5637 uint8_t u8Vector,
5638 uint32_t fFlags,
5639 uint16_t uErr,
5640 uint64_t uCr2)
5641{
5642 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5643 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5644}
5645#endif
5646
5647
5648/** \#DE - 00. */
5649DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5650{
5651 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5652}
5653
5654
5655/** \#DB - 01.
5656 * @note This automatically clear DR7.GD. */
5657DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5658{
5659 /** @todo set/clear RF. */
5660 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5661 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5662}
5663
5664
5665/** \#BR - 05. */
5666DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5667{
5668 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5669}
5670
5671
5672/** \#UD - 06. */
5673DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5674{
5675 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5676}
5677
5678
5679/** \#NM - 07. */
5680DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5681{
5682 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5683}
5684
5685
5686/** \#TS(err) - 0a. */
5687DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5688{
5689 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5690}
5691
5692
5693/** \#TS(tr) - 0a. */
5694DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5695{
5696 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5697 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5698}
5699
5700
5701/** \#TS(0) - 0a. */
5702DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5703{
5704 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5705 0, 0);
5706}
5707
5708
5709/** \#TS(err) - 0a. */
5710DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5711{
5712 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5713 uSel & X86_SEL_MASK_OFF_RPL, 0);
5714}
5715
5716
5717/** \#NP(err) - 0b. */
5718DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5719{
5720 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5721}
5722
5723
5724/** \#NP(sel) - 0b. */
5725DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5726{
5727 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5728 uSel & ~X86_SEL_RPL, 0);
5729}
5730
5731
5732/** \#SS(seg) - 0c. */
5733DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5734{
5735 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5736 uSel & ~X86_SEL_RPL, 0);
5737}
5738
5739
5740/** \#SS(err) - 0c. */
5741DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5742{
5743 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5744}
5745
5746
5747/** \#GP(n) - 0d. */
5748DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5749{
5750 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5751}
5752
5753
5754/** \#GP(0) - 0d. */
5755DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5756{
5757 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5758}
5759
5760#ifdef IEM_WITH_SETJMP
5761/** \#GP(0) - 0d. */
5762DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5763{
5764 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5765}
5766#endif
5767
5768
5769/** \#GP(sel) - 0d. */
5770DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5771{
5772 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5773 Sel & ~X86_SEL_RPL, 0);
5774}
5775
5776
5777/** \#GP(0) - 0d. */
5778DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5779{
5780 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5781}
5782
5783
5784/** \#GP(sel) - 0d. */
5785DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5786{
5787 NOREF(iSegReg); NOREF(fAccess);
5788 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5789 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5790}
5791
5792#ifdef IEM_WITH_SETJMP
5793/** \#GP(sel) - 0d, longjmp. */
5794DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5795{
5796 NOREF(iSegReg); NOREF(fAccess);
5797 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5798 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5799}
5800#endif
5801
5802/** \#GP(sel) - 0d. */
5803DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5804{
5805 NOREF(Sel);
5806 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5807}
5808
5809#ifdef IEM_WITH_SETJMP
5810/** \#GP(sel) - 0d, longjmp. */
5811DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5812{
5813 NOREF(Sel);
5814 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5815}
5816#endif
5817
5818
5819/** \#GP(sel) - 0d. */
5820DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5821{
5822 NOREF(iSegReg); NOREF(fAccess);
5823 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5824}
5825
5826#ifdef IEM_WITH_SETJMP
5827/** \#GP(sel) - 0d, longjmp. */
5828DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5829 uint32_t fAccess)
5830{
5831 NOREF(iSegReg); NOREF(fAccess);
5832 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5833}
5834#endif
5835
5836
5837/** \#PF(n) - 0e. */
5838DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5839{
5840 uint16_t uErr;
5841 switch (rc)
5842 {
5843 case VERR_PAGE_NOT_PRESENT:
5844 case VERR_PAGE_TABLE_NOT_PRESENT:
5845 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5846 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5847 uErr = 0;
5848 break;
5849
5850 default:
5851 AssertMsgFailed(("%Rrc\n", rc));
5852 /* fall thru */
5853 case VERR_ACCESS_DENIED:
5854 uErr = X86_TRAP_PF_P;
5855 break;
5856
5857 /** @todo reserved */
5858 }
5859
5860 if (pVCpu->iem.s.uCpl == 3)
5861 uErr |= X86_TRAP_PF_US;
5862
5863 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5864 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5865 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5866 uErr |= X86_TRAP_PF_ID;
5867
5868#if 0 /* This is so much non-sense, really. Why was it done like that? */
5869 /* Note! RW access callers reporting a WRITE protection fault, will clear
5870 the READ flag before calling. So, read-modify-write accesses (RW)
5871 can safely be reported as READ faults. */
5872 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5873 uErr |= X86_TRAP_PF_RW;
5874#else
5875 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5876 {
5877 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5878 uErr |= X86_TRAP_PF_RW;
5879 }
5880#endif
5881
5882 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5883 uErr, GCPtrWhere);
5884}
5885
5886#ifdef IEM_WITH_SETJMP
5887/** \#PF(n) - 0e, longjmp. */
5888IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5889{
5890 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5891}
5892#endif
5893
5894
5895/** \#MF(0) - 10. */
5896DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5897{
5898 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5899}
5900
5901
5902/** \#AC(0) - 11. */
5903DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5904{
5905 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5906}
5907
5908
5909/**
5910 * Macro for calling iemCImplRaiseDivideError().
5911 *
5912 * This enables us to add/remove arguments and force different levels of
5913 * inlining as we wish.
5914 *
5915 * @return Strict VBox status code.
5916 */
5917#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5918IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5919{
5920 NOREF(cbInstr);
5921 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5922}
5923
5924
5925/**
5926 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5927 *
5928 * This enables us to add/remove arguments and force different levels of
5929 * inlining as we wish.
5930 *
5931 * @return Strict VBox status code.
5932 */
5933#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5934IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5935{
5936 NOREF(cbInstr);
5937 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5938}
5939
5940
5941/**
5942 * Macro for calling iemCImplRaiseInvalidOpcode().
5943 *
5944 * This enables us to add/remove arguments and force different levels of
5945 * inlining as we wish.
5946 *
5947 * @return Strict VBox status code.
5948 */
5949#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5950IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5951{
5952 NOREF(cbInstr);
5953 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5954}
5955
5956
5957/** @} */
5958
5959
5960/*
5961 *
5962 * Helpers routines.
5963 * Helpers routines.
5964 * Helpers routines.
5965 *
5966 */
5967
5968/**
5969 * Recalculates the effective operand size.
5970 *
5971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5972 */
5973IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5974{
5975 switch (pVCpu->iem.s.enmCpuMode)
5976 {
5977 case IEMMODE_16BIT:
5978 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5979 break;
5980 case IEMMODE_32BIT:
5981 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5982 break;
5983 case IEMMODE_64BIT:
5984 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5985 {
5986 case 0:
5987 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5988 break;
5989 case IEM_OP_PRF_SIZE_OP:
5990 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5991 break;
5992 case IEM_OP_PRF_SIZE_REX_W:
5993 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5994 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5995 break;
5996 }
5997 break;
5998 default:
5999 AssertFailed();
6000 }
6001}
6002
6003
6004/**
6005 * Sets the default operand size to 64-bit and recalculates the effective
6006 * operand size.
6007 *
6008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6009 */
6010IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6011{
6012 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6013 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6014 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6015 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6016 else
6017 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6018}
6019
6020
6021/*
6022 *
6023 * Common opcode decoders.
6024 * Common opcode decoders.
6025 * Common opcode decoders.
6026 *
6027 */
6028//#include <iprt/mem.h>
6029
6030/**
6031 * Used to add extra details about a stub case.
6032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6033 */
6034IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6035{
6036#if defined(LOG_ENABLED) && defined(IN_RING3)
6037 PVM pVM = pVCpu->CTX_SUFF(pVM);
6038 char szRegs[4096];
6039 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6040 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6041 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6042 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6043 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6044 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6045 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6046 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6047 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6048 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6049 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6050 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6051 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6052 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6053 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6054 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6055 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6056 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6057 " efer=%016VR{efer}\n"
6058 " pat=%016VR{pat}\n"
6059 " sf_mask=%016VR{sf_mask}\n"
6060 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6061 " lstar=%016VR{lstar}\n"
6062 " star=%016VR{star} cstar=%016VR{cstar}\n"
6063 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6064 );
6065
6066 char szInstr[256];
6067 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6068 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6069 szInstr, sizeof(szInstr), NULL);
6070
6071 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6072#else
6073 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6074#endif
6075}
6076
6077/**
6078 * Complains about a stub.
6079 *
6080 * Providing two versions of this macro, one for daily use and one for use when
6081 * working on IEM.
6082 */
6083#if 0
6084# define IEMOP_BITCH_ABOUT_STUB() \
6085 do { \
6086 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6087 iemOpStubMsg2(pVCpu); \
6088 RTAssertPanic(); \
6089 } while (0)
6090#else
6091# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6092#endif
6093
6094/** Stubs an opcode. */
6095#define FNIEMOP_STUB(a_Name) \
6096 FNIEMOP_DEF(a_Name) \
6097 { \
6098 RT_NOREF_PV(pVCpu); \
6099 IEMOP_BITCH_ABOUT_STUB(); \
6100 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6101 } \
6102 typedef int ignore_semicolon
6103
6104/** Stubs an opcode. */
6105#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6106 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6107 { \
6108 RT_NOREF_PV(pVCpu); \
6109 RT_NOREF_PV(a_Name0); \
6110 IEMOP_BITCH_ABOUT_STUB(); \
6111 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6112 } \
6113 typedef int ignore_semicolon
6114
6115/** Stubs an opcode which currently should raise \#UD. */
6116#define FNIEMOP_UD_STUB(a_Name) \
6117 FNIEMOP_DEF(a_Name) \
6118 { \
6119 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6120 return IEMOP_RAISE_INVALID_OPCODE(); \
6121 } \
6122 typedef int ignore_semicolon
6123
6124/** Stubs an opcode which currently should raise \#UD. */
6125#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6126 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6127 { \
6128 RT_NOREF_PV(pVCpu); \
6129 RT_NOREF_PV(a_Name0); \
6130 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6131 return IEMOP_RAISE_INVALID_OPCODE(); \
6132 } \
6133 typedef int ignore_semicolon
6134
6135
6136
6137/** @name Register Access.
6138 * @{
6139 */
6140
6141/**
6142 * Gets a reference (pointer) to the specified hidden segment register.
6143 *
6144 * @returns Hidden register reference.
6145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6146 * @param iSegReg The segment register.
6147 */
6148IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6149{
6150 Assert(iSegReg < X86_SREG_COUNT);
6151 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6152 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6153
6154#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6155 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6156 { /* likely */ }
6157 else
6158 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6159#else
6160 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6161#endif
6162 return pSReg;
6163}
6164
6165
6166/**
6167 * Ensures that the given hidden segment register is up to date.
6168 *
6169 * @returns Hidden register reference.
6170 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6171 * @param pSReg The segment register.
6172 */
6173IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6174{
6175#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6176 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6177 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6178#else
6179 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6180 NOREF(pVCpu);
6181#endif
6182 return pSReg;
6183}
6184
6185
6186/**
6187 * Gets a reference (pointer) to the specified segment register (the selector
6188 * value).
6189 *
6190 * @returns Pointer to the selector variable.
6191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6192 * @param iSegReg The segment register.
6193 */
6194DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6195{
6196 Assert(iSegReg < X86_SREG_COUNT);
6197 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6198 return &pCtx->aSRegs[iSegReg].Sel;
6199}
6200
6201
6202/**
6203 * Fetches the selector value of a segment register.
6204 *
6205 * @returns The selector value.
6206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6207 * @param iSegReg The segment register.
6208 */
6209DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6210{
6211 Assert(iSegReg < X86_SREG_COUNT);
6212 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6213}
6214
6215
6216/**
6217 * Gets a reference (pointer) to the specified general purpose register.
6218 *
6219 * @returns Register reference.
6220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6221 * @param iReg The general purpose register.
6222 */
6223DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6224{
6225 Assert(iReg < 16);
6226 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6227 return &pCtx->aGRegs[iReg];
6228}
6229
6230
6231/**
6232 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6233 *
6234 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6235 *
6236 * @returns Register reference.
6237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6238 * @param iReg The register.
6239 */
6240DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6241{
6242 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6243 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6244 {
6245 Assert(iReg < 16);
6246 return &pCtx->aGRegs[iReg].u8;
6247 }
6248 /* high 8-bit register. */
6249 Assert(iReg < 8);
6250 return &pCtx->aGRegs[iReg & 3].bHi;
6251}
6252
6253
6254/**
6255 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6256 *
6257 * @returns Register reference.
6258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6259 * @param iReg The register.
6260 */
6261DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6262{
6263 Assert(iReg < 16);
6264 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6265 return &pCtx->aGRegs[iReg].u16;
6266}
6267
6268
6269/**
6270 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6271 *
6272 * @returns Register reference.
6273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6274 * @param iReg The register.
6275 */
6276DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6277{
6278 Assert(iReg < 16);
6279 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6280 return &pCtx->aGRegs[iReg].u32;
6281}
6282
6283
6284/**
6285 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6286 *
6287 * @returns Register reference.
6288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6289 * @param iReg The register.
6290 */
6291DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6292{
6293 Assert(iReg < 64);
6294 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6295 return &pCtx->aGRegs[iReg].u64;
6296}
6297
6298
6299/**
6300 * Fetches the value of a 8-bit general purpose register.
6301 *
6302 * @returns The register value.
6303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6304 * @param iReg The register.
6305 */
6306DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6307{
6308 return *iemGRegRefU8(pVCpu, iReg);
6309}
6310
6311
6312/**
6313 * Fetches the value of a 16-bit general purpose register.
6314 *
6315 * @returns The register value.
6316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6317 * @param iReg The register.
6318 */
6319DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6320{
6321 Assert(iReg < 16);
6322 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6323}
6324
6325
6326/**
6327 * Fetches the value of a 32-bit general purpose register.
6328 *
6329 * @returns The register value.
6330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6331 * @param iReg The register.
6332 */
6333DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6334{
6335 Assert(iReg < 16);
6336 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6337}
6338
6339
6340/**
6341 * Fetches the value of a 64-bit general purpose register.
6342 *
6343 * @returns The register value.
6344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6345 * @param iReg The register.
6346 */
6347DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6348{
6349 Assert(iReg < 16);
6350 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6351}
6352
6353
6354/**
6355 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6356 *
6357 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6358 * segment limit.
6359 *
6360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6361 * @param offNextInstr The offset of the next instruction.
6362 */
6363IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6364{
6365 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6366 switch (pVCpu->iem.s.enmEffOpSize)
6367 {
6368 case IEMMODE_16BIT:
6369 {
6370 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6371 if ( uNewIp > pCtx->cs.u32Limit
6372 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6373 return iemRaiseGeneralProtectionFault0(pVCpu);
6374 pCtx->rip = uNewIp;
6375 break;
6376 }
6377
6378 case IEMMODE_32BIT:
6379 {
6380 Assert(pCtx->rip <= UINT32_MAX);
6381 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6382
6383 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6384 if (uNewEip > pCtx->cs.u32Limit)
6385 return iemRaiseGeneralProtectionFault0(pVCpu);
6386 pCtx->rip = uNewEip;
6387 break;
6388 }
6389
6390 case IEMMODE_64BIT:
6391 {
6392 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6393
6394 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6395 if (!IEM_IS_CANONICAL(uNewRip))
6396 return iemRaiseGeneralProtectionFault0(pVCpu);
6397 pCtx->rip = uNewRip;
6398 break;
6399 }
6400
6401 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6402 }
6403
6404 pCtx->eflags.Bits.u1RF = 0;
6405
6406#ifndef IEM_WITH_CODE_TLB
6407 /* Flush the prefetch buffer. */
6408 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6409#endif
6410
6411 return VINF_SUCCESS;
6412}
6413
6414
6415/**
6416 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6417 *
6418 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6419 * segment limit.
6420 *
6421 * @returns Strict VBox status code.
6422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6423 * @param offNextInstr The offset of the next instruction.
6424 */
6425IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6426{
6427 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6428 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6429
6430 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6431 if ( uNewIp > pCtx->cs.u32Limit
6432 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6433 return iemRaiseGeneralProtectionFault0(pVCpu);
6434 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6435 pCtx->rip = uNewIp;
6436 pCtx->eflags.Bits.u1RF = 0;
6437
6438#ifndef IEM_WITH_CODE_TLB
6439 /* Flush the prefetch buffer. */
6440 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6441#endif
6442
6443 return VINF_SUCCESS;
6444}
6445
6446
6447/**
6448 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6449 *
6450 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6451 * segment limit.
6452 *
6453 * @returns Strict VBox status code.
6454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6455 * @param offNextInstr The offset of the next instruction.
6456 */
6457IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6458{
6459 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6460 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6461
6462 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6463 {
6464 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6465
6466 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6467 if (uNewEip > pCtx->cs.u32Limit)
6468 return iemRaiseGeneralProtectionFault0(pVCpu);
6469 pCtx->rip = uNewEip;
6470 }
6471 else
6472 {
6473 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6474
6475 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6476 if (!IEM_IS_CANONICAL(uNewRip))
6477 return iemRaiseGeneralProtectionFault0(pVCpu);
6478 pCtx->rip = uNewRip;
6479 }
6480 pCtx->eflags.Bits.u1RF = 0;
6481
6482#ifndef IEM_WITH_CODE_TLB
6483 /* Flush the prefetch buffer. */
6484 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6485#endif
6486
6487 return VINF_SUCCESS;
6488}
6489
6490
6491/**
6492 * Performs a near jump to the specified address.
6493 *
6494 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6495 * segment limit.
6496 *
6497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6498 * @param uNewRip The new RIP value.
6499 */
6500IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6501{
6502 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6503 switch (pVCpu->iem.s.enmEffOpSize)
6504 {
6505 case IEMMODE_16BIT:
6506 {
6507 Assert(uNewRip <= UINT16_MAX);
6508 if ( uNewRip > pCtx->cs.u32Limit
6509 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6510 return iemRaiseGeneralProtectionFault0(pVCpu);
6511 /** @todo Test 16-bit jump in 64-bit mode. */
6512 pCtx->rip = uNewRip;
6513 break;
6514 }
6515
6516 case IEMMODE_32BIT:
6517 {
6518 Assert(uNewRip <= UINT32_MAX);
6519 Assert(pCtx->rip <= UINT32_MAX);
6520 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6521
6522 if (uNewRip > pCtx->cs.u32Limit)
6523 return iemRaiseGeneralProtectionFault0(pVCpu);
6524 pCtx->rip = uNewRip;
6525 break;
6526 }
6527
6528 case IEMMODE_64BIT:
6529 {
6530 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6531
6532 if (!IEM_IS_CANONICAL(uNewRip))
6533 return iemRaiseGeneralProtectionFault0(pVCpu);
6534 pCtx->rip = uNewRip;
6535 break;
6536 }
6537
6538 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6539 }
6540
6541 pCtx->eflags.Bits.u1RF = 0;
6542
6543#ifndef IEM_WITH_CODE_TLB
6544 /* Flush the prefetch buffer. */
6545 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6546#endif
6547
6548 return VINF_SUCCESS;
6549}
6550
6551
6552/**
6553 * Get the address of the top of the stack.
6554 *
6555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6556 * @param pCtx The CPU context which SP/ESP/RSP should be
6557 * read.
6558 */
6559DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6560{
6561 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6562 return pCtx->rsp;
6563 if (pCtx->ss.Attr.n.u1DefBig)
6564 return pCtx->esp;
6565 return pCtx->sp;
6566}
6567
6568
6569/**
6570 * Updates the RIP/EIP/IP to point to the next instruction.
6571 *
6572 * This function leaves the EFLAGS.RF flag alone.
6573 *
6574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6575 * @param cbInstr The number of bytes to add.
6576 */
6577IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6578{
6579 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6580 switch (pVCpu->iem.s.enmCpuMode)
6581 {
6582 case IEMMODE_16BIT:
6583 Assert(pCtx->rip <= UINT16_MAX);
6584 pCtx->eip += cbInstr;
6585 pCtx->eip &= UINT32_C(0xffff);
6586 break;
6587
6588 case IEMMODE_32BIT:
6589 pCtx->eip += cbInstr;
6590 Assert(pCtx->rip <= UINT32_MAX);
6591 break;
6592
6593 case IEMMODE_64BIT:
6594 pCtx->rip += cbInstr;
6595 break;
6596 default: AssertFailed();
6597 }
6598}
6599
6600
6601#if 0
6602/**
6603 * Updates the RIP/EIP/IP to point to the next instruction.
6604 *
6605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6606 */
6607IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6608{
6609 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6610}
6611#endif
6612
6613
6614
6615/**
6616 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6617 *
6618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6619 * @param cbInstr The number of bytes to add.
6620 */
6621IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6622{
6623 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6624
6625 pCtx->eflags.Bits.u1RF = 0;
6626
6627 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6628#if ARCH_BITS >= 64
6629 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6630 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6631 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6632#else
6633 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6634 pCtx->rip += cbInstr;
6635 else
6636 {
6637 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6638 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6639 }
6640#endif
6641}
6642
6643
6644/**
6645 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6646 *
6647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6648 */
6649IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6650{
6651 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6652}
6653
6654
6655/**
6656 * Adds to the stack pointer.
6657 *
6658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6659 * @param pCtx The CPU context which SP/ESP/RSP should be
6660 * updated.
6661 * @param cbToAdd The number of bytes to add (8-bit!).
6662 */
6663DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6664{
6665 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6666 pCtx->rsp += cbToAdd;
6667 else if (pCtx->ss.Attr.n.u1DefBig)
6668 pCtx->esp += cbToAdd;
6669 else
6670 pCtx->sp += cbToAdd;
6671}
6672
6673
6674/**
6675 * Subtracts from the stack pointer.
6676 *
6677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6678 * @param pCtx The CPU context which SP/ESP/RSP should be
6679 * updated.
6680 * @param cbToSub The number of bytes to subtract (8-bit!).
6681 */
6682DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6683{
6684 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6685 pCtx->rsp -= cbToSub;
6686 else if (pCtx->ss.Attr.n.u1DefBig)
6687 pCtx->esp -= cbToSub;
6688 else
6689 pCtx->sp -= cbToSub;
6690}
6691
6692
6693/**
6694 * Adds to the temporary stack pointer.
6695 *
6696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6697 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6698 * @param cbToAdd The number of bytes to add (16-bit).
6699 * @param pCtx Where to get the current stack mode.
6700 */
6701DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6702{
6703 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6704 pTmpRsp->u += cbToAdd;
6705 else if (pCtx->ss.Attr.n.u1DefBig)
6706 pTmpRsp->DWords.dw0 += cbToAdd;
6707 else
6708 pTmpRsp->Words.w0 += cbToAdd;
6709}
6710
6711
6712/**
6713 * Subtracts from the temporary stack pointer.
6714 *
6715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6716 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6717 * @param cbToSub The number of bytes to subtract.
6718 * @param pCtx Where to get the current stack mode.
6719 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6720 * expecting that.
6721 */
6722DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6723{
6724 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6725 pTmpRsp->u -= cbToSub;
6726 else if (pCtx->ss.Attr.n.u1DefBig)
6727 pTmpRsp->DWords.dw0 -= cbToSub;
6728 else
6729 pTmpRsp->Words.w0 -= cbToSub;
6730}
6731
6732
6733/**
6734 * Calculates the effective stack address for a push of the specified size as
6735 * well as the new RSP value (upper bits may be masked).
6736 *
6737 * @returns Effective stack addressf for the push.
6738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6739 * @param pCtx Where to get the current stack mode.
6740 * @param cbItem The size of the stack item to pop.
6741 * @param puNewRsp Where to return the new RSP value.
6742 */
6743DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6744{
6745 RTUINT64U uTmpRsp;
6746 RTGCPTR GCPtrTop;
6747 uTmpRsp.u = pCtx->rsp;
6748
6749 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6750 GCPtrTop = uTmpRsp.u -= cbItem;
6751 else if (pCtx->ss.Attr.n.u1DefBig)
6752 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6753 else
6754 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6755 *puNewRsp = uTmpRsp.u;
6756 return GCPtrTop;
6757}
6758
6759
6760/**
6761 * Gets the current stack pointer and calculates the value after a pop of the
6762 * specified size.
6763 *
6764 * @returns Current stack pointer.
6765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6766 * @param pCtx Where to get the current stack mode.
6767 * @param cbItem The size of the stack item to pop.
6768 * @param puNewRsp Where to return the new RSP value.
6769 */
6770DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6771{
6772 RTUINT64U uTmpRsp;
6773 RTGCPTR GCPtrTop;
6774 uTmpRsp.u = pCtx->rsp;
6775
6776 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6777 {
6778 GCPtrTop = uTmpRsp.u;
6779 uTmpRsp.u += cbItem;
6780 }
6781 else if (pCtx->ss.Attr.n.u1DefBig)
6782 {
6783 GCPtrTop = uTmpRsp.DWords.dw0;
6784 uTmpRsp.DWords.dw0 += cbItem;
6785 }
6786 else
6787 {
6788 GCPtrTop = uTmpRsp.Words.w0;
6789 uTmpRsp.Words.w0 += cbItem;
6790 }
6791 *puNewRsp = uTmpRsp.u;
6792 return GCPtrTop;
6793}
6794
6795
6796/**
6797 * Calculates the effective stack address for a push of the specified size as
6798 * well as the new temporary RSP value (upper bits may be masked).
6799 *
6800 * @returns Effective stack addressf for the push.
6801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6802 * @param pCtx Where to get the current stack mode.
6803 * @param pTmpRsp The temporary stack pointer. This is updated.
6804 * @param cbItem The size of the stack item to pop.
6805 */
6806DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6807{
6808 RTGCPTR GCPtrTop;
6809
6810 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6811 GCPtrTop = pTmpRsp->u -= cbItem;
6812 else if (pCtx->ss.Attr.n.u1DefBig)
6813 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6814 else
6815 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6816 return GCPtrTop;
6817}
6818
6819
6820/**
6821 * Gets the effective stack address for a pop of the specified size and
6822 * calculates and updates the temporary RSP.
6823 *
6824 * @returns Current stack pointer.
6825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6826 * @param pCtx Where to get the current stack mode.
6827 * @param pTmpRsp The temporary stack pointer. This is updated.
6828 * @param cbItem The size of the stack item to pop.
6829 */
6830DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6831{
6832 RTGCPTR GCPtrTop;
6833 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6834 {
6835 GCPtrTop = pTmpRsp->u;
6836 pTmpRsp->u += cbItem;
6837 }
6838 else if (pCtx->ss.Attr.n.u1DefBig)
6839 {
6840 GCPtrTop = pTmpRsp->DWords.dw0;
6841 pTmpRsp->DWords.dw0 += cbItem;
6842 }
6843 else
6844 {
6845 GCPtrTop = pTmpRsp->Words.w0;
6846 pTmpRsp->Words.w0 += cbItem;
6847 }
6848 return GCPtrTop;
6849}
6850
6851/** @} */
6852
6853
6854/** @name FPU access and helpers.
6855 *
6856 * @{
6857 */
6858
6859
6860/**
6861 * Hook for preparing to use the host FPU.
6862 *
6863 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6864 *
6865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6866 */
6867DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6868{
6869#ifdef IN_RING3
6870 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6871#else
6872 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6873#endif
6874}
6875
6876
6877/**
6878 * Hook for preparing to use the host FPU for SSE
6879 *
6880 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6881 *
6882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6883 */
6884DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6885{
6886 iemFpuPrepareUsage(pVCpu);
6887}
6888
6889
6890/**
6891 * Hook for actualizing the guest FPU state before the interpreter reads it.
6892 *
6893 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6894 *
6895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6896 */
6897DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6898{
6899#ifdef IN_RING3
6900 NOREF(pVCpu);
6901#else
6902 CPUMRZFpuStateActualizeForRead(pVCpu);
6903#endif
6904}
6905
6906
6907/**
6908 * Hook for actualizing the guest FPU state before the interpreter changes it.
6909 *
6910 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6911 *
6912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6913 */
6914DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6915{
6916#ifdef IN_RING3
6917 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6918#else
6919 CPUMRZFpuStateActualizeForChange(pVCpu);
6920#endif
6921}
6922
6923
6924/**
6925 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6926 * only.
6927 *
6928 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6929 *
6930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6931 */
6932DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6933{
6934#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6935 NOREF(pVCpu);
6936#else
6937 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6938#endif
6939}
6940
6941
6942/**
6943 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6944 * read+write.
6945 *
6946 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6947 *
6948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6949 */
6950DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6951{
6952#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6953 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6954#else
6955 CPUMRZFpuStateActualizeForChange(pVCpu);
6956#endif
6957}
6958
6959
6960/**
6961 * Stores a QNaN value into a FPU register.
6962 *
6963 * @param pReg Pointer to the register.
6964 */
6965DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6966{
6967 pReg->au32[0] = UINT32_C(0x00000000);
6968 pReg->au32[1] = UINT32_C(0xc0000000);
6969 pReg->au16[4] = UINT16_C(0xffff);
6970}
6971
6972
6973/**
6974 * Updates the FOP, FPU.CS and FPUIP registers.
6975 *
6976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6977 * @param pCtx The CPU context.
6978 * @param pFpuCtx The FPU context.
6979 */
6980DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6981{
6982 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6983 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6984 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6985 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6986 {
6987 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6988 * happens in real mode here based on the fnsave and fnstenv images. */
6989 pFpuCtx->CS = 0;
6990 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6991 }
6992 else
6993 {
6994 pFpuCtx->CS = pCtx->cs.Sel;
6995 pFpuCtx->FPUIP = pCtx->rip;
6996 }
6997}
6998
6999
7000/**
7001 * Updates the x87.DS and FPUDP registers.
7002 *
7003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7004 * @param pCtx The CPU context.
7005 * @param pFpuCtx The FPU context.
7006 * @param iEffSeg The effective segment register.
7007 * @param GCPtrEff The effective address relative to @a iEffSeg.
7008 */
7009DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7010{
7011 RTSEL sel;
7012 switch (iEffSeg)
7013 {
7014 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7015 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7016 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7017 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7018 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7019 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7020 default:
7021 AssertMsgFailed(("%d\n", iEffSeg));
7022 sel = pCtx->ds.Sel;
7023 }
7024 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7025 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7026 {
7027 pFpuCtx->DS = 0;
7028 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7029 }
7030 else
7031 {
7032 pFpuCtx->DS = sel;
7033 pFpuCtx->FPUDP = GCPtrEff;
7034 }
7035}
7036
7037
7038/**
7039 * Rotates the stack registers in the push direction.
7040 *
7041 * @param pFpuCtx The FPU context.
7042 * @remarks This is a complete waste of time, but fxsave stores the registers in
7043 * stack order.
7044 */
7045DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7046{
7047 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7048 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7049 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7050 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7051 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7052 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7053 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7054 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7055 pFpuCtx->aRegs[0].r80 = r80Tmp;
7056}
7057
7058
7059/**
7060 * Rotates the stack registers in the pop direction.
7061 *
7062 * @param pFpuCtx The FPU context.
7063 * @remarks This is a complete waste of time, but fxsave stores the registers in
7064 * stack order.
7065 */
7066DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7067{
7068 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7069 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7070 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7071 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7072 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7073 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7074 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7075 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7076 pFpuCtx->aRegs[7].r80 = r80Tmp;
7077}
7078
7079
7080/**
7081 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7082 * exception prevents it.
7083 *
7084 * @param pResult The FPU operation result to push.
7085 * @param pFpuCtx The FPU context.
7086 */
7087IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7088{
7089 /* Update FSW and bail if there are pending exceptions afterwards. */
7090 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7091 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7092 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7093 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7094 {
7095 pFpuCtx->FSW = fFsw;
7096 return;
7097 }
7098
7099 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7100 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7101 {
7102 /* All is fine, push the actual value. */
7103 pFpuCtx->FTW |= RT_BIT(iNewTop);
7104 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7105 }
7106 else if (pFpuCtx->FCW & X86_FCW_IM)
7107 {
7108 /* Masked stack overflow, push QNaN. */
7109 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7110 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7111 }
7112 else
7113 {
7114 /* Raise stack overflow, don't push anything. */
7115 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7116 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7117 return;
7118 }
7119
7120 fFsw &= ~X86_FSW_TOP_MASK;
7121 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7122 pFpuCtx->FSW = fFsw;
7123
7124 iemFpuRotateStackPush(pFpuCtx);
7125}
7126
7127
7128/**
7129 * Stores a result in a FPU register and updates the FSW and FTW.
7130 *
7131 * @param pFpuCtx The FPU context.
7132 * @param pResult The result to store.
7133 * @param iStReg Which FPU register to store it in.
7134 */
7135IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7136{
7137 Assert(iStReg < 8);
7138 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7139 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7140 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7141 pFpuCtx->FTW |= RT_BIT(iReg);
7142 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7143}
7144
7145
7146/**
7147 * Only updates the FPU status word (FSW) with the result of the current
7148 * instruction.
7149 *
7150 * @param pFpuCtx The FPU context.
7151 * @param u16FSW The FSW output of the current instruction.
7152 */
7153IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7154{
7155 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7156 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7157}
7158
7159
7160/**
7161 * Pops one item off the FPU stack if no pending exception prevents it.
7162 *
7163 * @param pFpuCtx The FPU context.
7164 */
7165IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7166{
7167 /* Check pending exceptions. */
7168 uint16_t uFSW = pFpuCtx->FSW;
7169 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7170 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7171 return;
7172
7173 /* TOP--. */
7174 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7175 uFSW &= ~X86_FSW_TOP_MASK;
7176 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7177 pFpuCtx->FSW = uFSW;
7178
7179 /* Mark the previous ST0 as empty. */
7180 iOldTop >>= X86_FSW_TOP_SHIFT;
7181 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7182
7183 /* Rotate the registers. */
7184 iemFpuRotateStackPop(pFpuCtx);
7185}
7186
7187
7188/**
7189 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7190 *
7191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7192 * @param pResult The FPU operation result to push.
7193 */
7194IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7195{
7196 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7197 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7198 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7199 iemFpuMaybePushResult(pResult, pFpuCtx);
7200}
7201
7202
7203/**
7204 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7205 * and sets FPUDP and FPUDS.
7206 *
7207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7208 * @param pResult The FPU operation result to push.
7209 * @param iEffSeg The effective segment register.
7210 * @param GCPtrEff The effective address relative to @a iEffSeg.
7211 */
7212IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7213{
7214 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7215 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7216 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7217 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7218 iemFpuMaybePushResult(pResult, pFpuCtx);
7219}
7220
7221
7222/**
7223 * Replace ST0 with the first value and push the second onto the FPU stack,
7224 * unless a pending exception prevents it.
7225 *
7226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7227 * @param pResult The FPU operation result to store and push.
7228 */
7229IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7230{
7231 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7232 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7233 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7234
7235 /* Update FSW and bail if there are pending exceptions afterwards. */
7236 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7237 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7238 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7239 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7240 {
7241 pFpuCtx->FSW = fFsw;
7242 return;
7243 }
7244
7245 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7246 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7247 {
7248 /* All is fine, push the actual value. */
7249 pFpuCtx->FTW |= RT_BIT(iNewTop);
7250 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7251 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7252 }
7253 else if (pFpuCtx->FCW & X86_FCW_IM)
7254 {
7255 /* Masked stack overflow, push QNaN. */
7256 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7257 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7258 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7259 }
7260 else
7261 {
7262 /* Raise stack overflow, don't push anything. */
7263 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7264 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7265 return;
7266 }
7267
7268 fFsw &= ~X86_FSW_TOP_MASK;
7269 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7270 pFpuCtx->FSW = fFsw;
7271
7272 iemFpuRotateStackPush(pFpuCtx);
7273}
7274
7275
7276/**
7277 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7278 * FOP.
7279 *
7280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7281 * @param pResult The result to store.
7282 * @param iStReg Which FPU register to store it in.
7283 */
7284IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7285{
7286 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7287 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7288 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7289 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7290}
7291
7292
7293/**
7294 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7295 * FOP, and then pops the stack.
7296 *
7297 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7298 * @param pResult The result to store.
7299 * @param iStReg Which FPU register to store it in.
7300 */
7301IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7302{
7303 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7304 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7305 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7306 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7307 iemFpuMaybePopOne(pFpuCtx);
7308}
7309
7310
7311/**
7312 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7313 * FPUDP, and FPUDS.
7314 *
7315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7316 * @param pResult The result to store.
7317 * @param iStReg Which FPU register to store it in.
7318 * @param iEffSeg The effective memory operand selector register.
7319 * @param GCPtrEff The effective memory operand offset.
7320 */
7321IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7322 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7323{
7324 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7325 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7326 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7327 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7328 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7329}
7330
7331
7332/**
7333 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7334 * FPUDP, and FPUDS, and then pops the stack.
7335 *
7336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7337 * @param pResult The result to store.
7338 * @param iStReg Which FPU register to store it in.
7339 * @param iEffSeg The effective memory operand selector register.
7340 * @param GCPtrEff The effective memory operand offset.
7341 */
7342IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7343 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7344{
7345 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7346 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7347 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7348 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7349 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7350 iemFpuMaybePopOne(pFpuCtx);
7351}
7352
7353
7354/**
7355 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7356 *
7357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7358 */
7359IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7360{
7361 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7362 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7363 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7364}
7365
7366
7367/**
7368 * Marks the specified stack register as free (for FFREE).
7369 *
7370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7371 * @param iStReg The register to free.
7372 */
7373IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7374{
7375 Assert(iStReg < 8);
7376 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7377 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7378 pFpuCtx->FTW &= ~RT_BIT(iReg);
7379}
7380
7381
7382/**
7383 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7384 *
7385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7386 */
7387IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7388{
7389 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7390 uint16_t uFsw = pFpuCtx->FSW;
7391 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7392 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7393 uFsw &= ~X86_FSW_TOP_MASK;
7394 uFsw |= uTop;
7395 pFpuCtx->FSW = uFsw;
7396}
7397
7398
7399/**
7400 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7401 *
7402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7403 */
7404IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7405{
7406 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7407 uint16_t uFsw = pFpuCtx->FSW;
7408 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7409 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7410 uFsw &= ~X86_FSW_TOP_MASK;
7411 uFsw |= uTop;
7412 pFpuCtx->FSW = uFsw;
7413}
7414
7415
7416/**
7417 * Updates the FSW, FOP, FPUIP, and FPUCS.
7418 *
7419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7420 * @param u16FSW The FSW from the current instruction.
7421 */
7422IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7423{
7424 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7425 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7426 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7427 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7428}
7429
7430
7431/**
7432 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7433 *
7434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7435 * @param u16FSW The FSW from the current instruction.
7436 */
7437IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7438{
7439 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7440 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7441 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7442 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7443 iemFpuMaybePopOne(pFpuCtx);
7444}
7445
7446
7447/**
7448 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7449 *
7450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7451 * @param u16FSW The FSW from the current instruction.
7452 * @param iEffSeg The effective memory operand selector register.
7453 * @param GCPtrEff The effective memory operand offset.
7454 */
7455IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7456{
7457 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7458 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7459 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7460 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7461 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7462}
7463
7464
7465/**
7466 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7467 *
7468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7469 * @param u16FSW The FSW from the current instruction.
7470 */
7471IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7472{
7473 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7474 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7475 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7476 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7477 iemFpuMaybePopOne(pFpuCtx);
7478 iemFpuMaybePopOne(pFpuCtx);
7479}
7480
7481
7482/**
7483 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7484 *
7485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7486 * @param u16FSW The FSW from the current instruction.
7487 * @param iEffSeg The effective memory operand selector register.
7488 * @param GCPtrEff The effective memory operand offset.
7489 */
7490IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7491{
7492 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7493 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7494 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7495 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7496 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7497 iemFpuMaybePopOne(pFpuCtx);
7498}
7499
7500
7501/**
7502 * Worker routine for raising an FPU stack underflow exception.
7503 *
7504 * @param pFpuCtx The FPU context.
7505 * @param iStReg The stack register being accessed.
7506 */
7507IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7508{
7509 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7510 if (pFpuCtx->FCW & X86_FCW_IM)
7511 {
7512 /* Masked underflow. */
7513 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7514 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7515 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7516 if (iStReg != UINT8_MAX)
7517 {
7518 pFpuCtx->FTW |= RT_BIT(iReg);
7519 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7520 }
7521 }
7522 else
7523 {
7524 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7525 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7526 }
7527}
7528
7529
7530/**
7531 * Raises a FPU stack underflow exception.
7532 *
7533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7534 * @param iStReg The destination register that should be loaded
7535 * with QNaN if \#IS is not masked. Specify
7536 * UINT8_MAX if none (like for fcom).
7537 */
7538DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7539{
7540 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7541 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7542 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7543 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7544}
7545
7546
7547DECL_NO_INLINE(IEM_STATIC, void)
7548iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7549{
7550 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7551 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7552 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7553 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7554 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7555}
7556
7557
7558DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7559{
7560 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7561 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7562 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7563 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7564 iemFpuMaybePopOne(pFpuCtx);
7565}
7566
7567
7568DECL_NO_INLINE(IEM_STATIC, void)
7569iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7570{
7571 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7572 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7573 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7574 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7575 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7576 iemFpuMaybePopOne(pFpuCtx);
7577}
7578
7579
7580DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7581{
7582 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7583 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7584 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7585 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7586 iemFpuMaybePopOne(pFpuCtx);
7587 iemFpuMaybePopOne(pFpuCtx);
7588}
7589
7590
7591DECL_NO_INLINE(IEM_STATIC, void)
7592iemFpuStackPushUnderflow(PVMCPU pVCpu)
7593{
7594 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7595 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7596 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7597
7598 if (pFpuCtx->FCW & X86_FCW_IM)
7599 {
7600 /* Masked overflow - Push QNaN. */
7601 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7602 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7603 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7604 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7605 pFpuCtx->FTW |= RT_BIT(iNewTop);
7606 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7607 iemFpuRotateStackPush(pFpuCtx);
7608 }
7609 else
7610 {
7611 /* Exception pending - don't change TOP or the register stack. */
7612 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7613 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7614 }
7615}
7616
7617
7618DECL_NO_INLINE(IEM_STATIC, void)
7619iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7620{
7621 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7622 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7623 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7624
7625 if (pFpuCtx->FCW & X86_FCW_IM)
7626 {
7627 /* Masked overflow - Push QNaN. */
7628 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7629 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7630 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7631 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7632 pFpuCtx->FTW |= RT_BIT(iNewTop);
7633 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7634 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7635 iemFpuRotateStackPush(pFpuCtx);
7636 }
7637 else
7638 {
7639 /* Exception pending - don't change TOP or the register stack. */
7640 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7641 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7642 }
7643}
7644
7645
7646/**
7647 * Worker routine for raising an FPU stack overflow exception on a push.
7648 *
7649 * @param pFpuCtx The FPU context.
7650 */
7651IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7652{
7653 if (pFpuCtx->FCW & X86_FCW_IM)
7654 {
7655 /* Masked overflow. */
7656 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7657 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7658 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7659 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7660 pFpuCtx->FTW |= RT_BIT(iNewTop);
7661 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7662 iemFpuRotateStackPush(pFpuCtx);
7663 }
7664 else
7665 {
7666 /* Exception pending - don't change TOP or the register stack. */
7667 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7668 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7669 }
7670}
7671
7672
7673/**
7674 * Raises a FPU stack overflow exception on a push.
7675 *
7676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7677 */
7678DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7679{
7680 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7681 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7682 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7683 iemFpuStackPushOverflowOnly(pFpuCtx);
7684}
7685
7686
7687/**
7688 * Raises a FPU stack overflow exception on a push with a memory operand.
7689 *
7690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7691 * @param iEffSeg The effective memory operand selector register.
7692 * @param GCPtrEff The effective memory operand offset.
7693 */
7694DECL_NO_INLINE(IEM_STATIC, void)
7695iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7696{
7697 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7698 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7699 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7700 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7701 iemFpuStackPushOverflowOnly(pFpuCtx);
7702}
7703
7704
7705IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7706{
7707 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7708 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7709 if (pFpuCtx->FTW & RT_BIT(iReg))
7710 return VINF_SUCCESS;
7711 return VERR_NOT_FOUND;
7712}
7713
7714
7715IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7716{
7717 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7718 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7719 if (pFpuCtx->FTW & RT_BIT(iReg))
7720 {
7721 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7722 return VINF_SUCCESS;
7723 }
7724 return VERR_NOT_FOUND;
7725}
7726
7727
7728IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7729 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7730{
7731 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7732 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7733 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7734 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7735 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7736 {
7737 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7738 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7739 return VINF_SUCCESS;
7740 }
7741 return VERR_NOT_FOUND;
7742}
7743
7744
7745IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7746{
7747 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7748 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7749 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7750 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7751 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7752 {
7753 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7754 return VINF_SUCCESS;
7755 }
7756 return VERR_NOT_FOUND;
7757}
7758
7759
7760/**
7761 * Updates the FPU exception status after FCW is changed.
7762 *
7763 * @param pFpuCtx The FPU context.
7764 */
7765IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7766{
7767 uint16_t u16Fsw = pFpuCtx->FSW;
7768 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7769 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7770 else
7771 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7772 pFpuCtx->FSW = u16Fsw;
7773}
7774
7775
7776/**
7777 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7778 *
7779 * @returns The full FTW.
7780 * @param pFpuCtx The FPU context.
7781 */
7782IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7783{
7784 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7785 uint16_t u16Ftw = 0;
7786 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7787 for (unsigned iSt = 0; iSt < 8; iSt++)
7788 {
7789 unsigned const iReg = (iSt + iTop) & 7;
7790 if (!(u8Ftw & RT_BIT(iReg)))
7791 u16Ftw |= 3 << (iReg * 2); /* empty */
7792 else
7793 {
7794 uint16_t uTag;
7795 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7796 if (pr80Reg->s.uExponent == 0x7fff)
7797 uTag = 2; /* Exponent is all 1's => Special. */
7798 else if (pr80Reg->s.uExponent == 0x0000)
7799 {
7800 if (pr80Reg->s.u64Mantissa == 0x0000)
7801 uTag = 1; /* All bits are zero => Zero. */
7802 else
7803 uTag = 2; /* Must be special. */
7804 }
7805 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7806 uTag = 0; /* Valid. */
7807 else
7808 uTag = 2; /* Must be special. */
7809
7810 u16Ftw |= uTag << (iReg * 2); /* empty */
7811 }
7812 }
7813
7814 return u16Ftw;
7815}
7816
7817
7818/**
7819 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7820 *
7821 * @returns The compressed FTW.
7822 * @param u16FullFtw The full FTW to convert.
7823 */
7824IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7825{
7826 uint8_t u8Ftw = 0;
7827 for (unsigned i = 0; i < 8; i++)
7828 {
7829 if ((u16FullFtw & 3) != 3 /*empty*/)
7830 u8Ftw |= RT_BIT(i);
7831 u16FullFtw >>= 2;
7832 }
7833
7834 return u8Ftw;
7835}
7836
7837/** @} */
7838
7839
7840/** @name Memory access.
7841 *
7842 * @{
7843 */
7844
7845
7846/**
7847 * Updates the IEMCPU::cbWritten counter if applicable.
7848 *
7849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7850 * @param fAccess The access being accounted for.
7851 * @param cbMem The access size.
7852 */
7853DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7854{
7855 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7856 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7857 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7858}
7859
7860
7861/**
7862 * Checks if the given segment can be written to, raise the appropriate
7863 * exception if not.
7864 *
7865 * @returns VBox strict status code.
7866 *
7867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7868 * @param pHid Pointer to the hidden register.
7869 * @param iSegReg The register number.
7870 * @param pu64BaseAddr Where to return the base address to use for the
7871 * segment. (In 64-bit code it may differ from the
7872 * base in the hidden segment.)
7873 */
7874IEM_STATIC VBOXSTRICTRC
7875iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7876{
7877 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7878 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7879 else
7880 {
7881 if (!pHid->Attr.n.u1Present)
7882 {
7883 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7884 AssertRelease(uSel == 0);
7885 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7886 return iemRaiseGeneralProtectionFault0(pVCpu);
7887 }
7888
7889 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7890 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7891 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7892 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7893 *pu64BaseAddr = pHid->u64Base;
7894 }
7895 return VINF_SUCCESS;
7896}
7897
7898
7899/**
7900 * Checks if the given segment can be read from, raise the appropriate
7901 * exception if not.
7902 *
7903 * @returns VBox strict status code.
7904 *
7905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7906 * @param pHid Pointer to the hidden register.
7907 * @param iSegReg The register number.
7908 * @param pu64BaseAddr Where to return the base address to use for the
7909 * segment. (In 64-bit code it may differ from the
7910 * base in the hidden segment.)
7911 */
7912IEM_STATIC VBOXSTRICTRC
7913iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7914{
7915 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7916 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7917 else
7918 {
7919 if (!pHid->Attr.n.u1Present)
7920 {
7921 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7922 AssertRelease(uSel == 0);
7923 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7924 return iemRaiseGeneralProtectionFault0(pVCpu);
7925 }
7926
7927 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7928 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7929 *pu64BaseAddr = pHid->u64Base;
7930 }
7931 return VINF_SUCCESS;
7932}
7933
7934
7935/**
7936 * Applies the segment limit, base and attributes.
7937 *
7938 * This may raise a \#GP or \#SS.
7939 *
7940 * @returns VBox strict status code.
7941 *
7942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7943 * @param fAccess The kind of access which is being performed.
7944 * @param iSegReg The index of the segment register to apply.
7945 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7946 * TSS, ++).
7947 * @param cbMem The access size.
7948 * @param pGCPtrMem Pointer to the guest memory address to apply
7949 * segmentation to. Input and output parameter.
7950 */
7951IEM_STATIC VBOXSTRICTRC
7952iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7953{
7954 if (iSegReg == UINT8_MAX)
7955 return VINF_SUCCESS;
7956
7957 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7958 switch (pVCpu->iem.s.enmCpuMode)
7959 {
7960 case IEMMODE_16BIT:
7961 case IEMMODE_32BIT:
7962 {
7963 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7964 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7965
7966 if ( pSel->Attr.n.u1Present
7967 && !pSel->Attr.n.u1Unusable)
7968 {
7969 Assert(pSel->Attr.n.u1DescType);
7970 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7971 {
7972 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7973 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7974 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7975
7976 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7977 {
7978 /** @todo CPL check. */
7979 }
7980
7981 /*
7982 * There are two kinds of data selectors, normal and expand down.
7983 */
7984 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7985 {
7986 if ( GCPtrFirst32 > pSel->u32Limit
7987 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7988 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7989 }
7990 else
7991 {
7992 /*
7993 * The upper boundary is defined by the B bit, not the G bit!
7994 */
7995 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7996 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7997 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7998 }
7999 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8000 }
8001 else
8002 {
8003
8004 /*
8005 * Code selector and usually be used to read thru, writing is
8006 * only permitted in real and V8086 mode.
8007 */
8008 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8009 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8010 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8011 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8012 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8013
8014 if ( GCPtrFirst32 > pSel->u32Limit
8015 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8016 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8017
8018 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8019 {
8020 /** @todo CPL check. */
8021 }
8022
8023 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8024 }
8025 }
8026 else
8027 return iemRaiseGeneralProtectionFault0(pVCpu);
8028 return VINF_SUCCESS;
8029 }
8030
8031 case IEMMODE_64BIT:
8032 {
8033 RTGCPTR GCPtrMem = *pGCPtrMem;
8034 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8035 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8036
8037 Assert(cbMem >= 1);
8038 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8039 return VINF_SUCCESS;
8040 return iemRaiseGeneralProtectionFault0(pVCpu);
8041 }
8042
8043 default:
8044 AssertFailedReturn(VERR_IEM_IPE_7);
8045 }
8046}
8047
8048
8049/**
8050 * Translates a virtual address to a physical physical address and checks if we
8051 * can access the page as specified.
8052 *
8053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8054 * @param GCPtrMem The virtual address.
8055 * @param fAccess The intended access.
8056 * @param pGCPhysMem Where to return the physical address.
8057 */
8058IEM_STATIC VBOXSTRICTRC
8059iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8060{
8061 /** @todo Need a different PGM interface here. We're currently using
8062 * generic / REM interfaces. this won't cut it for R0 & RC. */
8063 RTGCPHYS GCPhys;
8064 uint64_t fFlags;
8065 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8066 if (RT_FAILURE(rc))
8067 {
8068 /** @todo Check unassigned memory in unpaged mode. */
8069 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8070 *pGCPhysMem = NIL_RTGCPHYS;
8071 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8072 }
8073
8074 /* If the page is writable and does not have the no-exec bit set, all
8075 access is allowed. Otherwise we'll have to check more carefully... */
8076 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8077 {
8078 /* Write to read only memory? */
8079 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8080 && !(fFlags & X86_PTE_RW)
8081 && ( (pVCpu->iem.s.uCpl == 3
8082 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8083 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8084 {
8085 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8086 *pGCPhysMem = NIL_RTGCPHYS;
8087 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8088 }
8089
8090 /* Kernel memory accessed by userland? */
8091 if ( !(fFlags & X86_PTE_US)
8092 && pVCpu->iem.s.uCpl == 3
8093 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8094 {
8095 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8096 *pGCPhysMem = NIL_RTGCPHYS;
8097 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8098 }
8099
8100 /* Executing non-executable memory? */
8101 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8102 && (fFlags & X86_PTE_PAE_NX)
8103 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8104 {
8105 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8106 *pGCPhysMem = NIL_RTGCPHYS;
8107 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8108 VERR_ACCESS_DENIED);
8109 }
8110 }
8111
8112 /*
8113 * Set the dirty / access flags.
8114 * ASSUMES this is set when the address is translated rather than on committ...
8115 */
8116 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8117 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8118 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8119 {
8120 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8121 AssertRC(rc2);
8122 }
8123
8124 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8125 *pGCPhysMem = GCPhys;
8126 return VINF_SUCCESS;
8127}
8128
8129
8130
8131/**
8132 * Maps a physical page.
8133 *
8134 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8135 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8136 * @param GCPhysMem The physical address.
8137 * @param fAccess The intended access.
8138 * @param ppvMem Where to return the mapping address.
8139 * @param pLock The PGM lock.
8140 */
8141IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8142{
8143#ifdef IEM_VERIFICATION_MODE_FULL
8144 /* Force the alternative path so we can ignore writes. */
8145 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8146 {
8147 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8148 {
8149 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8150 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8151 if (RT_FAILURE(rc2))
8152 pVCpu->iem.s.fProblematicMemory = true;
8153 }
8154 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8155 }
8156#endif
8157#ifdef IEM_LOG_MEMORY_WRITES
8158 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8159 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8160#endif
8161#ifdef IEM_VERIFICATION_MODE_MINIMAL
8162 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8163#endif
8164
8165 /** @todo This API may require some improving later. A private deal with PGM
8166 * regarding locking and unlocking needs to be struct. A couple of TLBs
8167 * living in PGM, but with publicly accessible inlined access methods
8168 * could perhaps be an even better solution. */
8169 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8170 GCPhysMem,
8171 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8172 pVCpu->iem.s.fBypassHandlers,
8173 ppvMem,
8174 pLock);
8175 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8176 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8177
8178#ifdef IEM_VERIFICATION_MODE_FULL
8179 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8180 pVCpu->iem.s.fProblematicMemory = true;
8181#endif
8182 return rc;
8183}
8184
8185
8186/**
8187 * Unmap a page previously mapped by iemMemPageMap.
8188 *
8189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8190 * @param GCPhysMem The physical address.
8191 * @param fAccess The intended access.
8192 * @param pvMem What iemMemPageMap returned.
8193 * @param pLock The PGM lock.
8194 */
8195DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8196{
8197 NOREF(pVCpu);
8198 NOREF(GCPhysMem);
8199 NOREF(fAccess);
8200 NOREF(pvMem);
8201 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8202}
8203
8204
8205/**
8206 * Looks up a memory mapping entry.
8207 *
8208 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8210 * @param pvMem The memory address.
8211 * @param fAccess The access to.
8212 */
8213DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8214{
8215 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8216 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8217 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8218 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8219 return 0;
8220 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8221 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8222 return 1;
8223 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8224 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8225 return 2;
8226 return VERR_NOT_FOUND;
8227}
8228
8229
8230/**
8231 * Finds a free memmap entry when using iNextMapping doesn't work.
8232 *
8233 * @returns Memory mapping index, 1024 on failure.
8234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8235 */
8236IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8237{
8238 /*
8239 * The easy case.
8240 */
8241 if (pVCpu->iem.s.cActiveMappings == 0)
8242 {
8243 pVCpu->iem.s.iNextMapping = 1;
8244 return 0;
8245 }
8246
8247 /* There should be enough mappings for all instructions. */
8248 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8249
8250 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8251 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8252 return i;
8253
8254 AssertFailedReturn(1024);
8255}
8256
8257
8258/**
8259 * Commits a bounce buffer that needs writing back and unmaps it.
8260 *
8261 * @returns Strict VBox status code.
8262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8263 * @param iMemMap The index of the buffer to commit.
8264 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8265 * Always false in ring-3, obviously.
8266 */
8267IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8268{
8269 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8270 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8271#ifdef IN_RING3
8272 Assert(!fPostponeFail);
8273 RT_NOREF_PV(fPostponeFail);
8274#endif
8275
8276 /*
8277 * Do the writing.
8278 */
8279#ifndef IEM_VERIFICATION_MODE_MINIMAL
8280 PVM pVM = pVCpu->CTX_SUFF(pVM);
8281 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8282 && !IEM_VERIFICATION_ENABLED(pVCpu))
8283 {
8284 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8285 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8286 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8287 if (!pVCpu->iem.s.fBypassHandlers)
8288 {
8289 /*
8290 * Carefully and efficiently dealing with access handler return
8291 * codes make this a little bloated.
8292 */
8293 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8294 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8295 pbBuf,
8296 cbFirst,
8297 PGMACCESSORIGIN_IEM);
8298 if (rcStrict == VINF_SUCCESS)
8299 {
8300 if (cbSecond)
8301 {
8302 rcStrict = PGMPhysWrite(pVM,
8303 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8304 pbBuf + cbFirst,
8305 cbSecond,
8306 PGMACCESSORIGIN_IEM);
8307 if (rcStrict == VINF_SUCCESS)
8308 { /* nothing */ }
8309 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8310 {
8311 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8312 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8313 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8314 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8315 }
8316# ifndef IN_RING3
8317 else if (fPostponeFail)
8318 {
8319 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8320 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8321 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8322 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8323 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8324 return iemSetPassUpStatus(pVCpu, rcStrict);
8325 }
8326# endif
8327 else
8328 {
8329 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8330 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8331 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8332 return rcStrict;
8333 }
8334 }
8335 }
8336 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8337 {
8338 if (!cbSecond)
8339 {
8340 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8341 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8342 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8343 }
8344 else
8345 {
8346 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8347 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8348 pbBuf + cbFirst,
8349 cbSecond,
8350 PGMACCESSORIGIN_IEM);
8351 if (rcStrict2 == VINF_SUCCESS)
8352 {
8353 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8354 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8355 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8356 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8357 }
8358 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8359 {
8360 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8361 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8362 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8363 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8364 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8365 }
8366# ifndef IN_RING3
8367 else if (fPostponeFail)
8368 {
8369 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8370 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8371 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8372 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8373 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8374 return iemSetPassUpStatus(pVCpu, rcStrict);
8375 }
8376# endif
8377 else
8378 {
8379 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8380 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8381 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8382 return rcStrict2;
8383 }
8384 }
8385 }
8386# ifndef IN_RING3
8387 else if (fPostponeFail)
8388 {
8389 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8390 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8392 if (!cbSecond)
8393 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8394 else
8395 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8396 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8397 return iemSetPassUpStatus(pVCpu, rcStrict);
8398 }
8399# endif
8400 else
8401 {
8402 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8403 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8404 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8405 return rcStrict;
8406 }
8407 }
8408 else
8409 {
8410 /*
8411 * No access handlers, much simpler.
8412 */
8413 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8414 if (RT_SUCCESS(rc))
8415 {
8416 if (cbSecond)
8417 {
8418 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8419 if (RT_SUCCESS(rc))
8420 { /* likely */ }
8421 else
8422 {
8423 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8425 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8426 return rc;
8427 }
8428 }
8429 }
8430 else
8431 {
8432 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8433 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8435 return rc;
8436 }
8437 }
8438 }
8439#endif
8440
8441#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8442 /*
8443 * Record the write(s).
8444 */
8445 if (!pVCpu->iem.s.fNoRem)
8446 {
8447 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8448 if (pEvtRec)
8449 {
8450 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8451 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8452 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8453 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8454 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8455 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8456 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8457 }
8458 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8459 {
8460 pEvtRec = iemVerifyAllocRecord(pVCpu);
8461 if (pEvtRec)
8462 {
8463 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8464 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8465 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8466 memcpy(pEvtRec->u.RamWrite.ab,
8467 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8468 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8469 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8470 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8471 }
8472 }
8473 }
8474#endif
8475#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8476 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8477 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8478 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8479 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8480 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8481 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8482
8483 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8484 g_cbIemWrote = cbWrote;
8485 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8486#endif
8487
8488 /*
8489 * Free the mapping entry.
8490 */
8491 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8492 Assert(pVCpu->iem.s.cActiveMappings != 0);
8493 pVCpu->iem.s.cActiveMappings--;
8494 return VINF_SUCCESS;
8495}
8496
8497
8498/**
8499 * iemMemMap worker that deals with a request crossing pages.
8500 */
8501IEM_STATIC VBOXSTRICTRC
8502iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8503{
8504 /*
8505 * Do the address translations.
8506 */
8507 RTGCPHYS GCPhysFirst;
8508 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8509 if (rcStrict != VINF_SUCCESS)
8510 return rcStrict;
8511
8512 RTGCPHYS GCPhysSecond;
8513 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8514 fAccess, &GCPhysSecond);
8515 if (rcStrict != VINF_SUCCESS)
8516 return rcStrict;
8517 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8518
8519 PVM pVM = pVCpu->CTX_SUFF(pVM);
8520#ifdef IEM_VERIFICATION_MODE_FULL
8521 /*
8522 * Detect problematic memory when verifying so we can select
8523 * the right execution engine. (TLB: Redo this.)
8524 */
8525 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8526 {
8527 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8528 if (RT_SUCCESS(rc2))
8529 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8530 if (RT_FAILURE(rc2))
8531 pVCpu->iem.s.fProblematicMemory = true;
8532 }
8533#endif
8534
8535
8536 /*
8537 * Read in the current memory content if it's a read, execute or partial
8538 * write access.
8539 */
8540 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8541 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8542 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8543
8544 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8545 {
8546 if (!pVCpu->iem.s.fBypassHandlers)
8547 {
8548 /*
8549 * Must carefully deal with access handler status codes here,
8550 * makes the code a bit bloated.
8551 */
8552 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8553 if (rcStrict == VINF_SUCCESS)
8554 {
8555 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8556 if (rcStrict == VINF_SUCCESS)
8557 { /*likely */ }
8558 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8559 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8560 else
8561 {
8562 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8563 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8564 return rcStrict;
8565 }
8566 }
8567 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8568 {
8569 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8570 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8571 {
8572 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8573 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8574 }
8575 else
8576 {
8577 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8578 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8579 return rcStrict2;
8580 }
8581 }
8582 else
8583 {
8584 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8585 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8586 return rcStrict;
8587 }
8588 }
8589 else
8590 {
8591 /*
8592 * No informational status codes here, much more straight forward.
8593 */
8594 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8595 if (RT_SUCCESS(rc))
8596 {
8597 Assert(rc == VINF_SUCCESS);
8598 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8599 if (RT_SUCCESS(rc))
8600 Assert(rc == VINF_SUCCESS);
8601 else
8602 {
8603 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8604 return rc;
8605 }
8606 }
8607 else
8608 {
8609 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8610 return rc;
8611 }
8612 }
8613
8614#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8615 if ( !pVCpu->iem.s.fNoRem
8616 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8617 {
8618 /*
8619 * Record the reads.
8620 */
8621 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8622 if (pEvtRec)
8623 {
8624 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8625 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8626 pEvtRec->u.RamRead.cb = cbFirstPage;
8627 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8628 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8629 }
8630 pEvtRec = iemVerifyAllocRecord(pVCpu);
8631 if (pEvtRec)
8632 {
8633 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8634 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8635 pEvtRec->u.RamRead.cb = cbSecondPage;
8636 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8637 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8638 }
8639 }
8640#endif
8641 }
8642#ifdef VBOX_STRICT
8643 else
8644 memset(pbBuf, 0xcc, cbMem);
8645 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8646 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8647#endif
8648
8649 /*
8650 * Commit the bounce buffer entry.
8651 */
8652 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8653 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8654 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8655 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8656 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8657 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8658 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8659 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8660 pVCpu->iem.s.cActiveMappings++;
8661
8662 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8663 *ppvMem = pbBuf;
8664 return VINF_SUCCESS;
8665}
8666
8667
8668/**
8669 * iemMemMap woker that deals with iemMemPageMap failures.
8670 */
8671IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8672 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8673{
8674 /*
8675 * Filter out conditions we can handle and the ones which shouldn't happen.
8676 */
8677 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8678 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8679 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8680 {
8681 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8682 return rcMap;
8683 }
8684 pVCpu->iem.s.cPotentialExits++;
8685
8686 /*
8687 * Read in the current memory content if it's a read, execute or partial
8688 * write access.
8689 */
8690 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8691 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8692 {
8693 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8694 memset(pbBuf, 0xff, cbMem);
8695 else
8696 {
8697 int rc;
8698 if (!pVCpu->iem.s.fBypassHandlers)
8699 {
8700 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8701 if (rcStrict == VINF_SUCCESS)
8702 { /* nothing */ }
8703 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8704 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8705 else
8706 {
8707 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8708 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8709 return rcStrict;
8710 }
8711 }
8712 else
8713 {
8714 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8715 if (RT_SUCCESS(rc))
8716 { /* likely */ }
8717 else
8718 {
8719 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8720 GCPhysFirst, rc));
8721 return rc;
8722 }
8723 }
8724 }
8725
8726#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8727 if ( !pVCpu->iem.s.fNoRem
8728 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8729 {
8730 /*
8731 * Record the read.
8732 */
8733 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8734 if (pEvtRec)
8735 {
8736 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8737 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8738 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8739 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8740 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8741 }
8742 }
8743#endif
8744 }
8745#ifdef VBOX_STRICT
8746 else
8747 memset(pbBuf, 0xcc, cbMem);
8748#endif
8749#ifdef VBOX_STRICT
8750 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8751 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8752#endif
8753
8754 /*
8755 * Commit the bounce buffer entry.
8756 */
8757 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8758 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8759 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8760 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8761 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8762 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8763 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8764 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8765 pVCpu->iem.s.cActiveMappings++;
8766
8767 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8768 *ppvMem = pbBuf;
8769 return VINF_SUCCESS;
8770}
8771
8772
8773
8774/**
8775 * Maps the specified guest memory for the given kind of access.
8776 *
8777 * This may be using bounce buffering of the memory if it's crossing a page
8778 * boundary or if there is an access handler installed for any of it. Because
8779 * of lock prefix guarantees, we're in for some extra clutter when this
8780 * happens.
8781 *
8782 * This may raise a \#GP, \#SS, \#PF or \#AC.
8783 *
8784 * @returns VBox strict status code.
8785 *
8786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8787 * @param ppvMem Where to return the pointer to the mapped
8788 * memory.
8789 * @param cbMem The number of bytes to map. This is usually 1,
8790 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8791 * string operations it can be up to a page.
8792 * @param iSegReg The index of the segment register to use for
8793 * this access. The base and limits are checked.
8794 * Use UINT8_MAX to indicate that no segmentation
8795 * is required (for IDT, GDT and LDT accesses).
8796 * @param GCPtrMem The address of the guest memory.
8797 * @param fAccess How the memory is being accessed. The
8798 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8799 * how to map the memory, while the
8800 * IEM_ACCESS_WHAT_XXX bit is used when raising
8801 * exceptions.
8802 */
8803IEM_STATIC VBOXSTRICTRC
8804iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8805{
8806 /*
8807 * Check the input and figure out which mapping entry to use.
8808 */
8809 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8810 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8811 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8812
8813 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8814 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8815 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8816 {
8817 iMemMap = iemMemMapFindFree(pVCpu);
8818 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8819 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8820 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8821 pVCpu->iem.s.aMemMappings[2].fAccess),
8822 VERR_IEM_IPE_9);
8823 }
8824
8825 /*
8826 * Map the memory, checking that we can actually access it. If something
8827 * slightly complicated happens, fall back on bounce buffering.
8828 */
8829 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8830 if (rcStrict != VINF_SUCCESS)
8831 return rcStrict;
8832
8833 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8834 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8835
8836 RTGCPHYS GCPhysFirst;
8837 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8838 if (rcStrict != VINF_SUCCESS)
8839 return rcStrict;
8840
8841 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8842 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8843 if (fAccess & IEM_ACCESS_TYPE_READ)
8844 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8845
8846 void *pvMem;
8847 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8848 if (rcStrict != VINF_SUCCESS)
8849 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8850
8851 /*
8852 * Fill in the mapping table entry.
8853 */
8854 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8855 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8856 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8857 pVCpu->iem.s.cActiveMappings++;
8858
8859 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8860 *ppvMem = pvMem;
8861 return VINF_SUCCESS;
8862}
8863
8864
8865/**
8866 * Commits the guest memory if bounce buffered and unmaps it.
8867 *
8868 * @returns Strict VBox status code.
8869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8870 * @param pvMem The mapping.
8871 * @param fAccess The kind of access.
8872 */
8873IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8874{
8875 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8876 AssertReturn(iMemMap >= 0, iMemMap);
8877
8878 /* If it's bounce buffered, we may need to write back the buffer. */
8879 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8880 {
8881 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8882 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8883 }
8884 /* Otherwise unlock it. */
8885 else
8886 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8887
8888 /* Free the entry. */
8889 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8890 Assert(pVCpu->iem.s.cActiveMappings != 0);
8891 pVCpu->iem.s.cActiveMappings--;
8892 return VINF_SUCCESS;
8893}
8894
8895#ifdef IEM_WITH_SETJMP
8896
8897/**
8898 * Maps the specified guest memory for the given kind of access, longjmp on
8899 * error.
8900 *
8901 * This may be using bounce buffering of the memory if it's crossing a page
8902 * boundary or if there is an access handler installed for any of it. Because
8903 * of lock prefix guarantees, we're in for some extra clutter when this
8904 * happens.
8905 *
8906 * This may raise a \#GP, \#SS, \#PF or \#AC.
8907 *
8908 * @returns Pointer to the mapped memory.
8909 *
8910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8911 * @param cbMem The number of bytes to map. This is usually 1,
8912 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8913 * string operations it can be up to a page.
8914 * @param iSegReg The index of the segment register to use for
8915 * this access. The base and limits are checked.
8916 * Use UINT8_MAX to indicate that no segmentation
8917 * is required (for IDT, GDT and LDT accesses).
8918 * @param GCPtrMem The address of the guest memory.
8919 * @param fAccess How the memory is being accessed. The
8920 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8921 * how to map the memory, while the
8922 * IEM_ACCESS_WHAT_XXX bit is used when raising
8923 * exceptions.
8924 */
8925IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8926{
8927 /*
8928 * Check the input and figure out which mapping entry to use.
8929 */
8930 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8931 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8932 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8933
8934 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8935 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8936 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8937 {
8938 iMemMap = iemMemMapFindFree(pVCpu);
8939 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8940 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8941 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8942 pVCpu->iem.s.aMemMappings[2].fAccess),
8943 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8944 }
8945
8946 /*
8947 * Map the memory, checking that we can actually access it. If something
8948 * slightly complicated happens, fall back on bounce buffering.
8949 */
8950 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8951 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8952 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8953
8954 /* Crossing a page boundary? */
8955 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8956 { /* No (likely). */ }
8957 else
8958 {
8959 void *pvMem;
8960 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8961 if (rcStrict == VINF_SUCCESS)
8962 return pvMem;
8963 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8964 }
8965
8966 RTGCPHYS GCPhysFirst;
8967 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8968 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8969 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8970
8971 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8972 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8973 if (fAccess & IEM_ACCESS_TYPE_READ)
8974 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8975
8976 void *pvMem;
8977 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8978 if (rcStrict == VINF_SUCCESS)
8979 { /* likely */ }
8980 else
8981 {
8982 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8983 if (rcStrict == VINF_SUCCESS)
8984 return pvMem;
8985 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8986 }
8987
8988 /*
8989 * Fill in the mapping table entry.
8990 */
8991 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8992 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8993 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8994 pVCpu->iem.s.cActiveMappings++;
8995
8996 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8997 return pvMem;
8998}
8999
9000
9001/**
9002 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9003 *
9004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9005 * @param pvMem The mapping.
9006 * @param fAccess The kind of access.
9007 */
9008IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9009{
9010 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9011 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9012
9013 /* If it's bounce buffered, we may need to write back the buffer. */
9014 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9015 {
9016 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9017 {
9018 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9019 if (rcStrict == VINF_SUCCESS)
9020 return;
9021 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9022 }
9023 }
9024 /* Otherwise unlock it. */
9025 else
9026 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9027
9028 /* Free the entry. */
9029 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9030 Assert(pVCpu->iem.s.cActiveMappings != 0);
9031 pVCpu->iem.s.cActiveMappings--;
9032}
9033
9034#endif
9035
9036#ifndef IN_RING3
9037/**
9038 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9039 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9040 *
9041 * Allows the instruction to be completed and retired, while the IEM user will
9042 * return to ring-3 immediately afterwards and do the postponed writes there.
9043 *
9044 * @returns VBox status code (no strict statuses). Caller must check
9045 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9047 * @param pvMem The mapping.
9048 * @param fAccess The kind of access.
9049 */
9050IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9051{
9052 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9053 AssertReturn(iMemMap >= 0, iMemMap);
9054
9055 /* If it's bounce buffered, we may need to write back the buffer. */
9056 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9057 {
9058 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9059 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9060 }
9061 /* Otherwise unlock it. */
9062 else
9063 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9064
9065 /* Free the entry. */
9066 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9067 Assert(pVCpu->iem.s.cActiveMappings != 0);
9068 pVCpu->iem.s.cActiveMappings--;
9069 return VINF_SUCCESS;
9070}
9071#endif
9072
9073
9074/**
9075 * Rollbacks mappings, releasing page locks and such.
9076 *
9077 * The caller shall only call this after checking cActiveMappings.
9078 *
9079 * @returns Strict VBox status code to pass up.
9080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9081 */
9082IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9083{
9084 Assert(pVCpu->iem.s.cActiveMappings > 0);
9085
9086 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9087 while (iMemMap-- > 0)
9088 {
9089 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9090 if (fAccess != IEM_ACCESS_INVALID)
9091 {
9092 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9093 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9094 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9095 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9096 Assert(pVCpu->iem.s.cActiveMappings > 0);
9097 pVCpu->iem.s.cActiveMappings--;
9098 }
9099 }
9100}
9101
9102
9103/**
9104 * Fetches a data byte.
9105 *
9106 * @returns Strict VBox status code.
9107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9108 * @param pu8Dst Where to return the byte.
9109 * @param iSegReg The index of the segment register to use for
9110 * this access. The base and limits are checked.
9111 * @param GCPtrMem The address of the guest memory.
9112 */
9113IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9114{
9115 /* The lazy approach for now... */
9116 uint8_t const *pu8Src;
9117 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9118 if (rc == VINF_SUCCESS)
9119 {
9120 *pu8Dst = *pu8Src;
9121 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9122 }
9123 return rc;
9124}
9125
9126
9127#ifdef IEM_WITH_SETJMP
9128/**
9129 * Fetches a data byte, longjmp on error.
9130 *
9131 * @returns The byte.
9132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9133 * @param iSegReg The index of the segment register to use for
9134 * this access. The base and limits are checked.
9135 * @param GCPtrMem The address of the guest memory.
9136 */
9137DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9138{
9139 /* The lazy approach for now... */
9140 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9141 uint8_t const bRet = *pu8Src;
9142 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9143 return bRet;
9144}
9145#endif /* IEM_WITH_SETJMP */
9146
9147
9148/**
9149 * Fetches a data word.
9150 *
9151 * @returns Strict VBox status code.
9152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9153 * @param pu16Dst Where to return the word.
9154 * @param iSegReg The index of the segment register to use for
9155 * this access. The base and limits are checked.
9156 * @param GCPtrMem The address of the guest memory.
9157 */
9158IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9159{
9160 /* The lazy approach for now... */
9161 uint16_t const *pu16Src;
9162 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9163 if (rc == VINF_SUCCESS)
9164 {
9165 *pu16Dst = *pu16Src;
9166 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9167 }
9168 return rc;
9169}
9170
9171
9172#ifdef IEM_WITH_SETJMP
9173/**
9174 * Fetches a data word, longjmp on error.
9175 *
9176 * @returns The word
9177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9178 * @param iSegReg The index of the segment register to use for
9179 * this access. The base and limits are checked.
9180 * @param GCPtrMem The address of the guest memory.
9181 */
9182DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9183{
9184 /* The lazy approach for now... */
9185 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9186 uint16_t const u16Ret = *pu16Src;
9187 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9188 return u16Ret;
9189}
9190#endif
9191
9192
9193/**
9194 * Fetches a data dword.
9195 *
9196 * @returns Strict VBox status code.
9197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9198 * @param pu32Dst Where to return the dword.
9199 * @param iSegReg The index of the segment register to use for
9200 * this access. The base and limits are checked.
9201 * @param GCPtrMem The address of the guest memory.
9202 */
9203IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9204{
9205 /* The lazy approach for now... */
9206 uint32_t const *pu32Src;
9207 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9208 if (rc == VINF_SUCCESS)
9209 {
9210 *pu32Dst = *pu32Src;
9211 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9212 }
9213 return rc;
9214}
9215
9216
9217#ifdef IEM_WITH_SETJMP
9218
9219IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9220{
9221 Assert(cbMem >= 1);
9222 Assert(iSegReg < X86_SREG_COUNT);
9223
9224 /*
9225 * 64-bit mode is simpler.
9226 */
9227 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9228 {
9229 if (iSegReg >= X86_SREG_FS)
9230 {
9231 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9232 GCPtrMem += pSel->u64Base;
9233 }
9234
9235 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9236 return GCPtrMem;
9237 }
9238 /*
9239 * 16-bit and 32-bit segmentation.
9240 */
9241 else
9242 {
9243 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9244 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9245 == X86DESCATTR_P /* data, expand up */
9246 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9247 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9248 {
9249 /* expand up */
9250 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9251 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9252 && GCPtrLast32 > (uint32_t)GCPtrMem))
9253 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9254 }
9255 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9256 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9257 {
9258 /* expand down */
9259 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9260 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9261 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9262 && GCPtrLast32 > (uint32_t)GCPtrMem))
9263 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9264 }
9265 else
9266 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9267 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9268 }
9269 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9270}
9271
9272
9273IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9274{
9275 Assert(cbMem >= 1);
9276 Assert(iSegReg < X86_SREG_COUNT);
9277
9278 /*
9279 * 64-bit mode is simpler.
9280 */
9281 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9282 {
9283 if (iSegReg >= X86_SREG_FS)
9284 {
9285 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9286 GCPtrMem += pSel->u64Base;
9287 }
9288
9289 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9290 return GCPtrMem;
9291 }
9292 /*
9293 * 16-bit and 32-bit segmentation.
9294 */
9295 else
9296 {
9297 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9298 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9299 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9300 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9301 {
9302 /* expand up */
9303 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9304 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9305 && GCPtrLast32 > (uint32_t)GCPtrMem))
9306 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9307 }
9308 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9309 {
9310 /* expand down */
9311 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9312 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9313 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9314 && GCPtrLast32 > (uint32_t)GCPtrMem))
9315 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9316 }
9317 else
9318 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9319 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9320 }
9321 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9322}
9323
9324
9325/**
9326 * Fetches a data dword, longjmp on error, fallback/safe version.
9327 *
9328 * @returns The dword
9329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9330 * @param iSegReg The index of the segment register to use for
9331 * this access. The base and limits are checked.
9332 * @param GCPtrMem The address of the guest memory.
9333 */
9334IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9335{
9336 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9337 uint32_t const u32Ret = *pu32Src;
9338 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9339 return u32Ret;
9340}
9341
9342
9343/**
9344 * Fetches a data dword, longjmp on error.
9345 *
9346 * @returns The dword
9347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9348 * @param iSegReg The index of the segment register to use for
9349 * this access. The base and limits are checked.
9350 * @param GCPtrMem The address of the guest memory.
9351 */
9352DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9353{
9354# ifdef IEM_WITH_DATA_TLB
9355 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9356 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9357 {
9358 /// @todo more later.
9359 }
9360
9361 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9362# else
9363 /* The lazy approach. */
9364 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9365 uint32_t const u32Ret = *pu32Src;
9366 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9367 return u32Ret;
9368# endif
9369}
9370#endif
9371
9372
9373#ifdef SOME_UNUSED_FUNCTION
9374/**
9375 * Fetches a data dword and sign extends it to a qword.
9376 *
9377 * @returns Strict VBox status code.
9378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9379 * @param pu64Dst Where to return the sign extended value.
9380 * @param iSegReg The index of the segment register to use for
9381 * this access. The base and limits are checked.
9382 * @param GCPtrMem The address of the guest memory.
9383 */
9384IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9385{
9386 /* The lazy approach for now... */
9387 int32_t const *pi32Src;
9388 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9389 if (rc == VINF_SUCCESS)
9390 {
9391 *pu64Dst = *pi32Src;
9392 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9393 }
9394#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9395 else
9396 *pu64Dst = 0;
9397#endif
9398 return rc;
9399}
9400#endif
9401
9402
9403/**
9404 * Fetches a data qword.
9405 *
9406 * @returns Strict VBox status code.
9407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9408 * @param pu64Dst Where to return the qword.
9409 * @param iSegReg The index of the segment register to use for
9410 * this access. The base and limits are checked.
9411 * @param GCPtrMem The address of the guest memory.
9412 */
9413IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9414{
9415 /* The lazy approach for now... */
9416 uint64_t const *pu64Src;
9417 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9418 if (rc == VINF_SUCCESS)
9419 {
9420 *pu64Dst = *pu64Src;
9421 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9422 }
9423 return rc;
9424}
9425
9426
9427#ifdef IEM_WITH_SETJMP
9428/**
9429 * Fetches a data qword, longjmp on error.
9430 *
9431 * @returns The qword.
9432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9433 * @param iSegReg The index of the segment register to use for
9434 * this access. The base and limits are checked.
9435 * @param GCPtrMem The address of the guest memory.
9436 */
9437DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9438{
9439 /* The lazy approach for now... */
9440 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9441 uint64_t const u64Ret = *pu64Src;
9442 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9443 return u64Ret;
9444}
9445#endif
9446
9447
9448/**
9449 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9450 *
9451 * @returns Strict VBox status code.
9452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9453 * @param pu64Dst Where to return the qword.
9454 * @param iSegReg The index of the segment register to use for
9455 * this access. The base and limits are checked.
9456 * @param GCPtrMem The address of the guest memory.
9457 */
9458IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9459{
9460 /* The lazy approach for now... */
9461 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9462 if (RT_UNLIKELY(GCPtrMem & 15))
9463 return iemRaiseGeneralProtectionFault0(pVCpu);
9464
9465 uint64_t const *pu64Src;
9466 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9467 if (rc == VINF_SUCCESS)
9468 {
9469 *pu64Dst = *pu64Src;
9470 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9471 }
9472 return rc;
9473}
9474
9475
9476#ifdef IEM_WITH_SETJMP
9477/**
9478 * Fetches a data qword, longjmp on error.
9479 *
9480 * @returns The qword.
9481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9482 * @param iSegReg The index of the segment register to use for
9483 * this access. The base and limits are checked.
9484 * @param GCPtrMem The address of the guest memory.
9485 */
9486DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9487{
9488 /* The lazy approach for now... */
9489 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9490 if (RT_LIKELY(!(GCPtrMem & 15)))
9491 {
9492 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9493 uint64_t const u64Ret = *pu64Src;
9494 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9495 return u64Ret;
9496 }
9497
9498 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9499 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9500}
9501#endif
9502
9503
9504/**
9505 * Fetches a data tword.
9506 *
9507 * @returns Strict VBox status code.
9508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9509 * @param pr80Dst Where to return the tword.
9510 * @param iSegReg The index of the segment register to use for
9511 * this access. The base and limits are checked.
9512 * @param GCPtrMem The address of the guest memory.
9513 */
9514IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9515{
9516 /* The lazy approach for now... */
9517 PCRTFLOAT80U pr80Src;
9518 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9519 if (rc == VINF_SUCCESS)
9520 {
9521 *pr80Dst = *pr80Src;
9522 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9523 }
9524 return rc;
9525}
9526
9527
9528#ifdef IEM_WITH_SETJMP
9529/**
9530 * Fetches a data tword, longjmp on error.
9531 *
9532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9533 * @param pr80Dst Where to return the tword.
9534 * @param iSegReg The index of the segment register to use for
9535 * this access. The base and limits are checked.
9536 * @param GCPtrMem The address of the guest memory.
9537 */
9538DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9539{
9540 /* The lazy approach for now... */
9541 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9542 *pr80Dst = *pr80Src;
9543 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9544}
9545#endif
9546
9547
9548/**
9549 * Fetches a data dqword (double qword), generally SSE related.
9550 *
9551 * @returns Strict VBox status code.
9552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9553 * @param pu128Dst Where to return the qword.
9554 * @param iSegReg The index of the segment register to use for
9555 * this access. The base and limits are checked.
9556 * @param GCPtrMem The address of the guest memory.
9557 */
9558IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9559{
9560 /* The lazy approach for now... */
9561 PCRTUINT128U pu128Src;
9562 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9563 if (rc == VINF_SUCCESS)
9564 {
9565 pu128Dst->au64[0] = pu128Src->au64[0];
9566 pu128Dst->au64[1] = pu128Src->au64[1];
9567 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9568 }
9569 return rc;
9570}
9571
9572
9573#ifdef IEM_WITH_SETJMP
9574/**
9575 * Fetches a data dqword (double qword), generally SSE related.
9576 *
9577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9578 * @param pu128Dst Where to return the qword.
9579 * @param iSegReg The index of the segment register to use for
9580 * this access. The base and limits are checked.
9581 * @param GCPtrMem The address of the guest memory.
9582 */
9583IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9584{
9585 /* The lazy approach for now... */
9586 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9587 pu128Dst->au64[0] = pu128Src->au64[0];
9588 pu128Dst->au64[1] = pu128Src->au64[1];
9589 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9590}
9591#endif
9592
9593
9594/**
9595 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9596 * related.
9597 *
9598 * Raises \#GP(0) if not aligned.
9599 *
9600 * @returns Strict VBox status code.
9601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9602 * @param pu128Dst Where to return the qword.
9603 * @param iSegReg The index of the segment register to use for
9604 * this access. The base and limits are checked.
9605 * @param GCPtrMem The address of the guest memory.
9606 */
9607IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9608{
9609 /* The lazy approach for now... */
9610 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9611 if ( (GCPtrMem & 15)
9612 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9613 return iemRaiseGeneralProtectionFault0(pVCpu);
9614
9615 PCRTUINT128U pu128Src;
9616 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9617 if (rc == VINF_SUCCESS)
9618 {
9619 pu128Dst->au64[0] = pu128Src->au64[0];
9620 pu128Dst->au64[1] = pu128Src->au64[1];
9621 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9622 }
9623 return rc;
9624}
9625
9626
9627#ifdef IEM_WITH_SETJMP
9628/**
9629 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9630 * related, longjmp on error.
9631 *
9632 * Raises \#GP(0) if not aligned.
9633 *
9634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9635 * @param pu128Dst Where to return the qword.
9636 * @param iSegReg The index of the segment register to use for
9637 * this access. The base and limits are checked.
9638 * @param GCPtrMem The address of the guest memory.
9639 */
9640DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9641{
9642 /* The lazy approach for now... */
9643 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9644 if ( (GCPtrMem & 15) == 0
9645 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9646 {
9647 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9648 pu128Dst->au64[0] = pu128Src->au64[0];
9649 pu128Dst->au64[1] = pu128Src->au64[1];
9650 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9651 return;
9652 }
9653
9654 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9655 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9656}
9657#endif
9658
9659
9660
9661/**
9662 * Fetches a descriptor register (lgdt, lidt).
9663 *
9664 * @returns Strict VBox status code.
9665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9666 * @param pcbLimit Where to return the limit.
9667 * @param pGCPtrBase Where to return the base.
9668 * @param iSegReg The index of the segment register to use for
9669 * this access. The base and limits are checked.
9670 * @param GCPtrMem The address of the guest memory.
9671 * @param enmOpSize The effective operand size.
9672 */
9673IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9674 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9675{
9676 /*
9677 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9678 * little special:
9679 * - The two reads are done separately.
9680 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9681 * - We suspect the 386 to actually commit the limit before the base in
9682 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9683 * don't try emulate this eccentric behavior, because it's not well
9684 * enough understood and rather hard to trigger.
9685 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9686 */
9687 VBOXSTRICTRC rcStrict;
9688 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9689 {
9690 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9691 if (rcStrict == VINF_SUCCESS)
9692 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9693 }
9694 else
9695 {
9696 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9697 if (enmOpSize == IEMMODE_32BIT)
9698 {
9699 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9700 {
9701 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9702 if (rcStrict == VINF_SUCCESS)
9703 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9704 }
9705 else
9706 {
9707 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9708 if (rcStrict == VINF_SUCCESS)
9709 {
9710 *pcbLimit = (uint16_t)uTmp;
9711 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9712 }
9713 }
9714 if (rcStrict == VINF_SUCCESS)
9715 *pGCPtrBase = uTmp;
9716 }
9717 else
9718 {
9719 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9720 if (rcStrict == VINF_SUCCESS)
9721 {
9722 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9723 if (rcStrict == VINF_SUCCESS)
9724 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9725 }
9726 }
9727 }
9728 return rcStrict;
9729}
9730
9731
9732
9733/**
9734 * Stores a data byte.
9735 *
9736 * @returns Strict VBox status code.
9737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9738 * @param iSegReg The index of the segment register to use for
9739 * this access. The base and limits are checked.
9740 * @param GCPtrMem The address of the guest memory.
9741 * @param u8Value The value to store.
9742 */
9743IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9744{
9745 /* The lazy approach for now... */
9746 uint8_t *pu8Dst;
9747 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9748 if (rc == VINF_SUCCESS)
9749 {
9750 *pu8Dst = u8Value;
9751 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9752 }
9753 return rc;
9754}
9755
9756
9757#ifdef IEM_WITH_SETJMP
9758/**
9759 * Stores a data byte, longjmp on error.
9760 *
9761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9762 * @param iSegReg The index of the segment register to use for
9763 * this access. The base and limits are checked.
9764 * @param GCPtrMem The address of the guest memory.
9765 * @param u8Value The value to store.
9766 */
9767IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9768{
9769 /* The lazy approach for now... */
9770 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9771 *pu8Dst = u8Value;
9772 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9773}
9774#endif
9775
9776
9777/**
9778 * Stores a data word.
9779 *
9780 * @returns Strict VBox status code.
9781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9782 * @param iSegReg The index of the segment register to use for
9783 * this access. The base and limits are checked.
9784 * @param GCPtrMem The address of the guest memory.
9785 * @param u16Value The value to store.
9786 */
9787IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9788{
9789 /* The lazy approach for now... */
9790 uint16_t *pu16Dst;
9791 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9792 if (rc == VINF_SUCCESS)
9793 {
9794 *pu16Dst = u16Value;
9795 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9796 }
9797 return rc;
9798}
9799
9800
9801#ifdef IEM_WITH_SETJMP
9802/**
9803 * Stores a data word, longjmp on error.
9804 *
9805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9806 * @param iSegReg The index of the segment register to use for
9807 * this access. The base and limits are checked.
9808 * @param GCPtrMem The address of the guest memory.
9809 * @param u16Value The value to store.
9810 */
9811IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9812{
9813 /* The lazy approach for now... */
9814 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9815 *pu16Dst = u16Value;
9816 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9817}
9818#endif
9819
9820
9821/**
9822 * Stores a data dword.
9823 *
9824 * @returns Strict VBox status code.
9825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9826 * @param iSegReg The index of the segment register to use for
9827 * this access. The base and limits are checked.
9828 * @param GCPtrMem The address of the guest memory.
9829 * @param u32Value The value to store.
9830 */
9831IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9832{
9833 /* The lazy approach for now... */
9834 uint32_t *pu32Dst;
9835 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9836 if (rc == VINF_SUCCESS)
9837 {
9838 *pu32Dst = u32Value;
9839 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9840 }
9841 return rc;
9842}
9843
9844
9845#ifdef IEM_WITH_SETJMP
9846/**
9847 * Stores a data dword.
9848 *
9849 * @returns Strict VBox status code.
9850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9851 * @param iSegReg The index of the segment register to use for
9852 * this access. The base and limits are checked.
9853 * @param GCPtrMem The address of the guest memory.
9854 * @param u32Value The value to store.
9855 */
9856IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9857{
9858 /* The lazy approach for now... */
9859 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9860 *pu32Dst = u32Value;
9861 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9862}
9863#endif
9864
9865
9866/**
9867 * Stores a data qword.
9868 *
9869 * @returns Strict VBox status code.
9870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9871 * @param iSegReg The index of the segment register to use for
9872 * this access. The base and limits are checked.
9873 * @param GCPtrMem The address of the guest memory.
9874 * @param u64Value The value to store.
9875 */
9876IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9877{
9878 /* The lazy approach for now... */
9879 uint64_t *pu64Dst;
9880 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9881 if (rc == VINF_SUCCESS)
9882 {
9883 *pu64Dst = u64Value;
9884 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9885 }
9886 return rc;
9887}
9888
9889
9890#ifdef IEM_WITH_SETJMP
9891/**
9892 * Stores a data qword, longjmp on error.
9893 *
9894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9895 * @param iSegReg The index of the segment register to use for
9896 * this access. The base and limits are checked.
9897 * @param GCPtrMem The address of the guest memory.
9898 * @param u64Value The value to store.
9899 */
9900IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9901{
9902 /* The lazy approach for now... */
9903 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9904 *pu64Dst = u64Value;
9905 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9906}
9907#endif
9908
9909
9910/**
9911 * Stores a data dqword.
9912 *
9913 * @returns Strict VBox status code.
9914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9915 * @param iSegReg The index of the segment register to use for
9916 * this access. The base and limits are checked.
9917 * @param GCPtrMem The address of the guest memory.
9918 * @param u128Value The value to store.
9919 */
9920IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9921{
9922 /* The lazy approach for now... */
9923 PRTUINT128U pu128Dst;
9924 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9925 if (rc == VINF_SUCCESS)
9926 {
9927 pu128Dst->au64[0] = u128Value.au64[0];
9928 pu128Dst->au64[1] = u128Value.au64[1];
9929 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9930 }
9931 return rc;
9932}
9933
9934
9935#ifdef IEM_WITH_SETJMP
9936/**
9937 * Stores a data dqword, longjmp on error.
9938 *
9939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9940 * @param iSegReg The index of the segment register to use for
9941 * this access. The base and limits are checked.
9942 * @param GCPtrMem The address of the guest memory.
9943 * @param u128Value The value to store.
9944 */
9945IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9946{
9947 /* The lazy approach for now... */
9948 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9949 pu128Dst->au64[0] = u128Value.au64[0];
9950 pu128Dst->au64[1] = u128Value.au64[1];
9951 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9952}
9953#endif
9954
9955
9956/**
9957 * Stores a data dqword, SSE aligned.
9958 *
9959 * @returns Strict VBox status code.
9960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9961 * @param iSegReg The index of the segment register to use for
9962 * this access. The base and limits are checked.
9963 * @param GCPtrMem The address of the guest memory.
9964 * @param u128Value The value to store.
9965 */
9966IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9967{
9968 /* The lazy approach for now... */
9969 if ( (GCPtrMem & 15)
9970 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9971 return iemRaiseGeneralProtectionFault0(pVCpu);
9972
9973 PRTUINT128U pu128Dst;
9974 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9975 if (rc == VINF_SUCCESS)
9976 {
9977 pu128Dst->au64[0] = u128Value.au64[0];
9978 pu128Dst->au64[1] = u128Value.au64[1];
9979 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9980 }
9981 return rc;
9982}
9983
9984
9985#ifdef IEM_WITH_SETJMP
9986/**
9987 * Stores a data dqword, SSE aligned.
9988 *
9989 * @returns Strict VBox status code.
9990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9991 * @param iSegReg The index of the segment register to use for
9992 * this access. The base and limits are checked.
9993 * @param GCPtrMem The address of the guest memory.
9994 * @param u128Value The value to store.
9995 */
9996DECL_NO_INLINE(IEM_STATIC, void)
9997iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9998{
9999 /* The lazy approach for now... */
10000 if ( (GCPtrMem & 15) == 0
10001 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10002 {
10003 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10004 pu128Dst->au64[0] = u128Value.au64[0];
10005 pu128Dst->au64[1] = u128Value.au64[1];
10006 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10007 return;
10008 }
10009
10010 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10011 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10012}
10013#endif
10014
10015
10016/**
10017 * Stores a descriptor register (sgdt, sidt).
10018 *
10019 * @returns Strict VBox status code.
10020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10021 * @param cbLimit The limit.
10022 * @param GCPtrBase The base address.
10023 * @param iSegReg The index of the segment register to use for
10024 * this access. The base and limits are checked.
10025 * @param GCPtrMem The address of the guest memory.
10026 */
10027IEM_STATIC VBOXSTRICTRC
10028iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10029{
10030 VBOXSTRICTRC rcStrict;
10031 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
10032 {
10033 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
10034 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
10035 }
10036
10037 /*
10038 * The SIDT and SGDT instructions actually stores the data using two
10039 * independent writes. The instructions does not respond to opsize prefixes.
10040 */
10041 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10042 if (rcStrict == VINF_SUCCESS)
10043 {
10044 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10045 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10046 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10047 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10048 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10049 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10050 else
10051 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10052 }
10053 return rcStrict;
10054}
10055
10056
10057/**
10058 * Pushes a word onto the stack.
10059 *
10060 * @returns Strict VBox status code.
10061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10062 * @param u16Value The value to push.
10063 */
10064IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10065{
10066 /* Increment the stack pointer. */
10067 uint64_t uNewRsp;
10068 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10069 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10070
10071 /* Write the word the lazy way. */
10072 uint16_t *pu16Dst;
10073 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10074 if (rc == VINF_SUCCESS)
10075 {
10076 *pu16Dst = u16Value;
10077 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10078 }
10079
10080 /* Commit the new RSP value unless we an access handler made trouble. */
10081 if (rc == VINF_SUCCESS)
10082 pCtx->rsp = uNewRsp;
10083
10084 return rc;
10085}
10086
10087
10088/**
10089 * Pushes a dword onto the stack.
10090 *
10091 * @returns Strict VBox status code.
10092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10093 * @param u32Value The value to push.
10094 */
10095IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10096{
10097 /* Increment the stack pointer. */
10098 uint64_t uNewRsp;
10099 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10100 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10101
10102 /* Write the dword the lazy way. */
10103 uint32_t *pu32Dst;
10104 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10105 if (rc == VINF_SUCCESS)
10106 {
10107 *pu32Dst = u32Value;
10108 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10109 }
10110
10111 /* Commit the new RSP value unless we an access handler made trouble. */
10112 if (rc == VINF_SUCCESS)
10113 pCtx->rsp = uNewRsp;
10114
10115 return rc;
10116}
10117
10118
10119/**
10120 * Pushes a dword segment register value onto the stack.
10121 *
10122 * @returns Strict VBox status code.
10123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10124 * @param u32Value The value to push.
10125 */
10126IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10127{
10128 /* Increment the stack pointer. */
10129 uint64_t uNewRsp;
10130 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10131 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10132
10133 VBOXSTRICTRC rc;
10134 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10135 {
10136 /* The recompiler writes a full dword. */
10137 uint32_t *pu32Dst;
10138 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10139 if (rc == VINF_SUCCESS)
10140 {
10141 *pu32Dst = u32Value;
10142 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10143 }
10144 }
10145 else
10146 {
10147 /* The intel docs talks about zero extending the selector register
10148 value. My actual intel CPU here might be zero extending the value
10149 but it still only writes the lower word... */
10150 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10151 * happens when crossing an electric page boundrary, is the high word checked
10152 * for write accessibility or not? Probably it is. What about segment limits?
10153 * It appears this behavior is also shared with trap error codes.
10154 *
10155 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10156 * ancient hardware when it actually did change. */
10157 uint16_t *pu16Dst;
10158 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10159 if (rc == VINF_SUCCESS)
10160 {
10161 *pu16Dst = (uint16_t)u32Value;
10162 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10163 }
10164 }
10165
10166 /* Commit the new RSP value unless we an access handler made trouble. */
10167 if (rc == VINF_SUCCESS)
10168 pCtx->rsp = uNewRsp;
10169
10170 return rc;
10171}
10172
10173
10174/**
10175 * Pushes a qword onto the stack.
10176 *
10177 * @returns Strict VBox status code.
10178 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10179 * @param u64Value The value to push.
10180 */
10181IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10182{
10183 /* Increment the stack pointer. */
10184 uint64_t uNewRsp;
10185 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10186 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10187
10188 /* Write the word the lazy way. */
10189 uint64_t *pu64Dst;
10190 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10191 if (rc == VINF_SUCCESS)
10192 {
10193 *pu64Dst = u64Value;
10194 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10195 }
10196
10197 /* Commit the new RSP value unless we an access handler made trouble. */
10198 if (rc == VINF_SUCCESS)
10199 pCtx->rsp = uNewRsp;
10200
10201 return rc;
10202}
10203
10204
10205/**
10206 * Pops a word from the stack.
10207 *
10208 * @returns Strict VBox status code.
10209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10210 * @param pu16Value Where to store the popped value.
10211 */
10212IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10213{
10214 /* Increment the stack pointer. */
10215 uint64_t uNewRsp;
10216 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10217 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10218
10219 /* Write the word the lazy way. */
10220 uint16_t const *pu16Src;
10221 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10222 if (rc == VINF_SUCCESS)
10223 {
10224 *pu16Value = *pu16Src;
10225 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10226
10227 /* Commit the new RSP value. */
10228 if (rc == VINF_SUCCESS)
10229 pCtx->rsp = uNewRsp;
10230 }
10231
10232 return rc;
10233}
10234
10235
10236/**
10237 * Pops a dword from the stack.
10238 *
10239 * @returns Strict VBox status code.
10240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10241 * @param pu32Value Where to store the popped value.
10242 */
10243IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10244{
10245 /* Increment the stack pointer. */
10246 uint64_t uNewRsp;
10247 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10248 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10249
10250 /* Write the word the lazy way. */
10251 uint32_t const *pu32Src;
10252 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10253 if (rc == VINF_SUCCESS)
10254 {
10255 *pu32Value = *pu32Src;
10256 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10257
10258 /* Commit the new RSP value. */
10259 if (rc == VINF_SUCCESS)
10260 pCtx->rsp = uNewRsp;
10261 }
10262
10263 return rc;
10264}
10265
10266
10267/**
10268 * Pops a qword from the stack.
10269 *
10270 * @returns Strict VBox status code.
10271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10272 * @param pu64Value Where to store the popped value.
10273 */
10274IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10275{
10276 /* Increment the stack pointer. */
10277 uint64_t uNewRsp;
10278 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10279 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10280
10281 /* Write the word the lazy way. */
10282 uint64_t const *pu64Src;
10283 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10284 if (rc == VINF_SUCCESS)
10285 {
10286 *pu64Value = *pu64Src;
10287 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10288
10289 /* Commit the new RSP value. */
10290 if (rc == VINF_SUCCESS)
10291 pCtx->rsp = uNewRsp;
10292 }
10293
10294 return rc;
10295}
10296
10297
10298/**
10299 * Pushes a word onto the stack, using a temporary stack pointer.
10300 *
10301 * @returns Strict VBox status code.
10302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10303 * @param u16Value The value to push.
10304 * @param pTmpRsp Pointer to the temporary stack pointer.
10305 */
10306IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10307{
10308 /* Increment the stack pointer. */
10309 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10310 RTUINT64U NewRsp = *pTmpRsp;
10311 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10312
10313 /* Write the word the lazy way. */
10314 uint16_t *pu16Dst;
10315 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10316 if (rc == VINF_SUCCESS)
10317 {
10318 *pu16Dst = u16Value;
10319 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10320 }
10321
10322 /* Commit the new RSP value unless we an access handler made trouble. */
10323 if (rc == VINF_SUCCESS)
10324 *pTmpRsp = NewRsp;
10325
10326 return rc;
10327}
10328
10329
10330/**
10331 * Pushes a dword onto the stack, using a temporary stack pointer.
10332 *
10333 * @returns Strict VBox status code.
10334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10335 * @param u32Value The value to push.
10336 * @param pTmpRsp Pointer to the temporary stack pointer.
10337 */
10338IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10339{
10340 /* Increment the stack pointer. */
10341 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10342 RTUINT64U NewRsp = *pTmpRsp;
10343 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10344
10345 /* Write the word the lazy way. */
10346 uint32_t *pu32Dst;
10347 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10348 if (rc == VINF_SUCCESS)
10349 {
10350 *pu32Dst = u32Value;
10351 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10352 }
10353
10354 /* Commit the new RSP value unless we an access handler made trouble. */
10355 if (rc == VINF_SUCCESS)
10356 *pTmpRsp = NewRsp;
10357
10358 return rc;
10359}
10360
10361
10362/**
10363 * Pushes a dword onto the stack, using a temporary stack pointer.
10364 *
10365 * @returns Strict VBox status code.
10366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10367 * @param u64Value The value to push.
10368 * @param pTmpRsp Pointer to the temporary stack pointer.
10369 */
10370IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10371{
10372 /* Increment the stack pointer. */
10373 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10374 RTUINT64U NewRsp = *pTmpRsp;
10375 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10376
10377 /* Write the word the lazy way. */
10378 uint64_t *pu64Dst;
10379 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10380 if (rc == VINF_SUCCESS)
10381 {
10382 *pu64Dst = u64Value;
10383 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10384 }
10385
10386 /* Commit the new RSP value unless we an access handler made trouble. */
10387 if (rc == VINF_SUCCESS)
10388 *pTmpRsp = NewRsp;
10389
10390 return rc;
10391}
10392
10393
10394/**
10395 * Pops a word from the stack, using a temporary stack pointer.
10396 *
10397 * @returns Strict VBox status code.
10398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10399 * @param pu16Value Where to store the popped value.
10400 * @param pTmpRsp Pointer to the temporary stack pointer.
10401 */
10402IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10403{
10404 /* Increment the stack pointer. */
10405 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10406 RTUINT64U NewRsp = *pTmpRsp;
10407 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10408
10409 /* Write the word the lazy way. */
10410 uint16_t const *pu16Src;
10411 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10412 if (rc == VINF_SUCCESS)
10413 {
10414 *pu16Value = *pu16Src;
10415 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10416
10417 /* Commit the new RSP value. */
10418 if (rc == VINF_SUCCESS)
10419 *pTmpRsp = NewRsp;
10420 }
10421
10422 return rc;
10423}
10424
10425
10426/**
10427 * Pops a dword from the stack, using a temporary stack pointer.
10428 *
10429 * @returns Strict VBox status code.
10430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10431 * @param pu32Value Where to store the popped value.
10432 * @param pTmpRsp Pointer to the temporary stack pointer.
10433 */
10434IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10435{
10436 /* Increment the stack pointer. */
10437 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10438 RTUINT64U NewRsp = *pTmpRsp;
10439 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10440
10441 /* Write the word the lazy way. */
10442 uint32_t const *pu32Src;
10443 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10444 if (rc == VINF_SUCCESS)
10445 {
10446 *pu32Value = *pu32Src;
10447 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10448
10449 /* Commit the new RSP value. */
10450 if (rc == VINF_SUCCESS)
10451 *pTmpRsp = NewRsp;
10452 }
10453
10454 return rc;
10455}
10456
10457
10458/**
10459 * Pops a qword from the stack, using a temporary stack pointer.
10460 *
10461 * @returns Strict VBox status code.
10462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10463 * @param pu64Value Where to store the popped value.
10464 * @param pTmpRsp Pointer to the temporary stack pointer.
10465 */
10466IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10467{
10468 /* Increment the stack pointer. */
10469 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10470 RTUINT64U NewRsp = *pTmpRsp;
10471 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10472
10473 /* Write the word the lazy way. */
10474 uint64_t const *pu64Src;
10475 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10476 if (rcStrict == VINF_SUCCESS)
10477 {
10478 *pu64Value = *pu64Src;
10479 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10480
10481 /* Commit the new RSP value. */
10482 if (rcStrict == VINF_SUCCESS)
10483 *pTmpRsp = NewRsp;
10484 }
10485
10486 return rcStrict;
10487}
10488
10489
10490/**
10491 * Begin a special stack push (used by interrupt, exceptions and such).
10492 *
10493 * This will raise \#SS or \#PF if appropriate.
10494 *
10495 * @returns Strict VBox status code.
10496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10497 * @param cbMem The number of bytes to push onto the stack.
10498 * @param ppvMem Where to return the pointer to the stack memory.
10499 * As with the other memory functions this could be
10500 * direct access or bounce buffered access, so
10501 * don't commit register until the commit call
10502 * succeeds.
10503 * @param puNewRsp Where to return the new RSP value. This must be
10504 * passed unchanged to
10505 * iemMemStackPushCommitSpecial().
10506 */
10507IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10508{
10509 Assert(cbMem < UINT8_MAX);
10510 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10511 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10512 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10513}
10514
10515
10516/**
10517 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10518 *
10519 * This will update the rSP.
10520 *
10521 * @returns Strict VBox status code.
10522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10523 * @param pvMem The pointer returned by
10524 * iemMemStackPushBeginSpecial().
10525 * @param uNewRsp The new RSP value returned by
10526 * iemMemStackPushBeginSpecial().
10527 */
10528IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10529{
10530 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10531 if (rcStrict == VINF_SUCCESS)
10532 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10533 return rcStrict;
10534}
10535
10536
10537/**
10538 * Begin a special stack pop (used by iret, retf and such).
10539 *
10540 * This will raise \#SS or \#PF if appropriate.
10541 *
10542 * @returns Strict VBox status code.
10543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10544 * @param cbMem The number of bytes to pop from the stack.
10545 * @param ppvMem Where to return the pointer to the stack memory.
10546 * @param puNewRsp Where to return the new RSP value. This must be
10547 * assigned to CPUMCTX::rsp manually some time
10548 * after iemMemStackPopDoneSpecial() has been
10549 * called.
10550 */
10551IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10552{
10553 Assert(cbMem < UINT8_MAX);
10554 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10555 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10556 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10557}
10558
10559
10560/**
10561 * Continue a special stack pop (used by iret and retf).
10562 *
10563 * This will raise \#SS or \#PF if appropriate.
10564 *
10565 * @returns Strict VBox status code.
10566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10567 * @param cbMem The number of bytes to pop from the stack.
10568 * @param ppvMem Where to return the pointer to the stack memory.
10569 * @param puNewRsp Where to return the new RSP value. This must be
10570 * assigned to CPUMCTX::rsp manually some time
10571 * after iemMemStackPopDoneSpecial() has been
10572 * called.
10573 */
10574IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10575{
10576 Assert(cbMem < UINT8_MAX);
10577 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10578 RTUINT64U NewRsp;
10579 NewRsp.u = *puNewRsp;
10580 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10581 *puNewRsp = NewRsp.u;
10582 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10583}
10584
10585
10586/**
10587 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10588 * iemMemStackPopContinueSpecial).
10589 *
10590 * The caller will manually commit the rSP.
10591 *
10592 * @returns Strict VBox status code.
10593 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10594 * @param pvMem The pointer returned by
10595 * iemMemStackPopBeginSpecial() or
10596 * iemMemStackPopContinueSpecial().
10597 */
10598IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10599{
10600 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10601}
10602
10603
10604/**
10605 * Fetches a system table byte.
10606 *
10607 * @returns Strict VBox status code.
10608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10609 * @param pbDst Where to return the byte.
10610 * @param iSegReg The index of the segment register to use for
10611 * this access. The base and limits are checked.
10612 * @param GCPtrMem The address of the guest memory.
10613 */
10614IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10615{
10616 /* The lazy approach for now... */
10617 uint8_t const *pbSrc;
10618 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10619 if (rc == VINF_SUCCESS)
10620 {
10621 *pbDst = *pbSrc;
10622 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10623 }
10624 return rc;
10625}
10626
10627
10628/**
10629 * Fetches a system table word.
10630 *
10631 * @returns Strict VBox status code.
10632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10633 * @param pu16Dst Where to return the word.
10634 * @param iSegReg The index of the segment register to use for
10635 * this access. The base and limits are checked.
10636 * @param GCPtrMem The address of the guest memory.
10637 */
10638IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10639{
10640 /* The lazy approach for now... */
10641 uint16_t const *pu16Src;
10642 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10643 if (rc == VINF_SUCCESS)
10644 {
10645 *pu16Dst = *pu16Src;
10646 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10647 }
10648 return rc;
10649}
10650
10651
10652/**
10653 * Fetches a system table dword.
10654 *
10655 * @returns Strict VBox status code.
10656 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10657 * @param pu32Dst Where to return the dword.
10658 * @param iSegReg The index of the segment register to use for
10659 * this access. The base and limits are checked.
10660 * @param GCPtrMem The address of the guest memory.
10661 */
10662IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10663{
10664 /* The lazy approach for now... */
10665 uint32_t const *pu32Src;
10666 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10667 if (rc == VINF_SUCCESS)
10668 {
10669 *pu32Dst = *pu32Src;
10670 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10671 }
10672 return rc;
10673}
10674
10675
10676/**
10677 * Fetches a system table qword.
10678 *
10679 * @returns Strict VBox status code.
10680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10681 * @param pu64Dst Where to return the qword.
10682 * @param iSegReg The index of the segment register to use for
10683 * this access. The base and limits are checked.
10684 * @param GCPtrMem The address of the guest memory.
10685 */
10686IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10687{
10688 /* The lazy approach for now... */
10689 uint64_t const *pu64Src;
10690 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10691 if (rc == VINF_SUCCESS)
10692 {
10693 *pu64Dst = *pu64Src;
10694 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10695 }
10696 return rc;
10697}
10698
10699
10700/**
10701 * Fetches a descriptor table entry with caller specified error code.
10702 *
10703 * @returns Strict VBox status code.
10704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10705 * @param pDesc Where to return the descriptor table entry.
10706 * @param uSel The selector which table entry to fetch.
10707 * @param uXcpt The exception to raise on table lookup error.
10708 * @param uErrorCode The error code associated with the exception.
10709 */
10710IEM_STATIC VBOXSTRICTRC
10711iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10712{
10713 AssertPtr(pDesc);
10714 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10715
10716 /** @todo did the 286 require all 8 bytes to be accessible? */
10717 /*
10718 * Get the selector table base and check bounds.
10719 */
10720 RTGCPTR GCPtrBase;
10721 if (uSel & X86_SEL_LDT)
10722 {
10723 if ( !pCtx->ldtr.Attr.n.u1Present
10724 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10725 {
10726 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10727 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10728 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10729 uErrorCode, 0);
10730 }
10731
10732 Assert(pCtx->ldtr.Attr.n.u1Present);
10733 GCPtrBase = pCtx->ldtr.u64Base;
10734 }
10735 else
10736 {
10737 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10738 {
10739 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10740 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10741 uErrorCode, 0);
10742 }
10743 GCPtrBase = pCtx->gdtr.pGdt;
10744 }
10745
10746 /*
10747 * Read the legacy descriptor and maybe the long mode extensions if
10748 * required.
10749 */
10750 VBOXSTRICTRC rcStrict;
10751 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10752 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10753 else
10754 {
10755 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10756 if (rcStrict == VINF_SUCCESS)
10757 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10758 if (rcStrict == VINF_SUCCESS)
10759 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10760 if (rcStrict == VINF_SUCCESS)
10761 pDesc->Legacy.au16[3] = 0;
10762 else
10763 return rcStrict;
10764 }
10765
10766 if (rcStrict == VINF_SUCCESS)
10767 {
10768 if ( !IEM_IS_LONG_MODE(pVCpu)
10769 || pDesc->Legacy.Gen.u1DescType)
10770 pDesc->Long.au64[1] = 0;
10771 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10772 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10773 else
10774 {
10775 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10776 /** @todo is this the right exception? */
10777 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10778 }
10779 }
10780 return rcStrict;
10781}
10782
10783
10784/**
10785 * Fetches a descriptor table entry.
10786 *
10787 * @returns Strict VBox status code.
10788 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10789 * @param pDesc Where to return the descriptor table entry.
10790 * @param uSel The selector which table entry to fetch.
10791 * @param uXcpt The exception to raise on table lookup error.
10792 */
10793IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10794{
10795 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10796}
10797
10798
10799/**
10800 * Fakes a long mode stack selector for SS = 0.
10801 *
10802 * @param pDescSs Where to return the fake stack descriptor.
10803 * @param uDpl The DPL we want.
10804 */
10805IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10806{
10807 pDescSs->Long.au64[0] = 0;
10808 pDescSs->Long.au64[1] = 0;
10809 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10810 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10811 pDescSs->Long.Gen.u2Dpl = uDpl;
10812 pDescSs->Long.Gen.u1Present = 1;
10813 pDescSs->Long.Gen.u1Long = 1;
10814}
10815
10816
10817/**
10818 * Marks the selector descriptor as accessed (only non-system descriptors).
10819 *
10820 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10821 * will therefore skip the limit checks.
10822 *
10823 * @returns Strict VBox status code.
10824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10825 * @param uSel The selector.
10826 */
10827IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10828{
10829 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10830
10831 /*
10832 * Get the selector table base and calculate the entry address.
10833 */
10834 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10835 ? pCtx->ldtr.u64Base
10836 : pCtx->gdtr.pGdt;
10837 GCPtr += uSel & X86_SEL_MASK;
10838
10839 /*
10840 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10841 * ugly stuff to avoid this. This will make sure it's an atomic access
10842 * as well more or less remove any question about 8-bit or 32-bit accesss.
10843 */
10844 VBOXSTRICTRC rcStrict;
10845 uint32_t volatile *pu32;
10846 if ((GCPtr & 3) == 0)
10847 {
10848 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10849 GCPtr += 2 + 2;
10850 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10851 if (rcStrict != VINF_SUCCESS)
10852 return rcStrict;
10853 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10854 }
10855 else
10856 {
10857 /* The misaligned GDT/LDT case, map the whole thing. */
10858 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10859 if (rcStrict != VINF_SUCCESS)
10860 return rcStrict;
10861 switch ((uintptr_t)pu32 & 3)
10862 {
10863 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10864 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10865 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10866 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10867 }
10868 }
10869
10870 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10871}
10872
10873/** @} */
10874
10875
10876/*
10877 * Include the C/C++ implementation of instruction.
10878 */
10879#include "IEMAllCImpl.cpp.h"
10880
10881
10882
10883/** @name "Microcode" macros.
10884 *
10885 * The idea is that we should be able to use the same code to interpret
10886 * instructions as well as recompiler instructions. Thus this obfuscation.
10887 *
10888 * @{
10889 */
10890#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10891#define IEM_MC_END() }
10892#define IEM_MC_PAUSE() do {} while (0)
10893#define IEM_MC_CONTINUE() do {} while (0)
10894
10895/** Internal macro. */
10896#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10897 do \
10898 { \
10899 VBOXSTRICTRC rcStrict2 = a_Expr; \
10900 if (rcStrict2 != VINF_SUCCESS) \
10901 return rcStrict2; \
10902 } while (0)
10903
10904
10905#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10906#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10907#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10908#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10909#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10910#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10911#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10912#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10913#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10914 do { \
10915 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10916 return iemRaiseDeviceNotAvailable(pVCpu); \
10917 } while (0)
10918#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
10919 do { \
10920 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
10921 return iemRaiseDeviceNotAvailable(pVCpu); \
10922 } while (0)
10923#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10924 do { \
10925 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10926 return iemRaiseMathFault(pVCpu); \
10927 } while (0)
10928#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
10929 do { \
10930 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10931 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10932 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
10933 return iemRaiseUndefinedOpcode(pVCpu); \
10934 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10935 return iemRaiseDeviceNotAvailable(pVCpu); \
10936 } while (0)
10937#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10938 do { \
10939 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10940 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10941 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10942 return iemRaiseUndefinedOpcode(pVCpu); \
10943 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10944 return iemRaiseDeviceNotAvailable(pVCpu); \
10945 } while (0)
10946#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10947 do { \
10948 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10949 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10950 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10951 return iemRaiseUndefinedOpcode(pVCpu); \
10952 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10953 return iemRaiseDeviceNotAvailable(pVCpu); \
10954 } while (0)
10955#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10956 do { \
10957 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10958 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10959 return iemRaiseUndefinedOpcode(pVCpu); \
10960 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10961 return iemRaiseDeviceNotAvailable(pVCpu); \
10962 } while (0)
10963#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10964 do { \
10965 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10966 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10967 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10968 return iemRaiseUndefinedOpcode(pVCpu); \
10969 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10970 return iemRaiseDeviceNotAvailable(pVCpu); \
10971 } while (0)
10972#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10973 do { \
10974 if (pVCpu->iem.s.uCpl != 0) \
10975 return iemRaiseGeneralProtectionFault0(pVCpu); \
10976 } while (0)
10977#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
10978 do { \
10979 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
10980 else return iemRaiseGeneralProtectionFault0(pVCpu); \
10981 } while (0)
10982
10983
10984#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10985#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10986#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10987#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10988#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10989#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10990#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10991 uint32_t a_Name; \
10992 uint32_t *a_pName = &a_Name
10993#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10994 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10995
10996#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10997#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10998
10999#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11000#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11001#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11002#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11003#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11004#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11005#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11006#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11007#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11008#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11009#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11010#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11011#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11012#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11013#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11014#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11015#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11016#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11017#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11018#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11019#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11020#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11021#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11022#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11023#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11024#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11025#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11026#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11027#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11028/** @note Not for IOPL or IF testing or modification. */
11029#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11030#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11031#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11032#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11033
11034#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11035#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11036#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11037#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11038#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11039#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11040#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11041#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11042#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11043#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11044#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11045 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11046
11047#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11048#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11049/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11050 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11051#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11052#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11053/** @note Not for IOPL or IF testing or modification. */
11054#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11055
11056#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11057#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11058#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11059 do { \
11060 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11061 *pu32Reg += (a_u32Value); \
11062 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11063 } while (0)
11064#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11065
11066#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11067#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11068#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11069 do { \
11070 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11071 *pu32Reg -= (a_u32Value); \
11072 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11073 } while (0)
11074#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11075#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11076
11077#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11078#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11079#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11080#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11081#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11082#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11083#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11084
11085#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11086#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11087#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11088#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11089
11090#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11091#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11092#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11093
11094#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11095#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11096#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11097
11098#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11099#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11100#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11101
11102#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11103#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11104#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11105
11106#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11107
11108#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11109
11110#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11111#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11112#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11113 do { \
11114 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11115 *pu32Reg &= (a_u32Value); \
11116 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11117 } while (0)
11118#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11119
11120#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11121#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11122#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11123 do { \
11124 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11125 *pu32Reg |= (a_u32Value); \
11126 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11127 } while (0)
11128#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11129
11130
11131/** @note Not for IOPL or IF modification. */
11132#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11133/** @note Not for IOPL or IF modification. */
11134#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11135/** @note Not for IOPL or IF modification. */
11136#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11137
11138#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11139
11140/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11141#define IEM_MC_FPU_TO_MMX_MODE() do { \
11142 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11143 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11144 } while (0)
11145
11146#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11147 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11148#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11149 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11150#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
11151 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
11152#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
11153 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
11154#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
11155 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11156#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11157 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11158#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11159 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11160
11161#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11162 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11163 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11164 } while (0)
11165#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11166 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11167#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11168 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11169#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11170 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11171#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11172 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11173 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11174 } while (0)
11175#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11176 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11177#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11178 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11179 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11180 } while (0)
11181#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11182 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11183#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11184 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11185 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11186 } while (0)
11187#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11188 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11189#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11190 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11191#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11192 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11193#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11194 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11195#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11196 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11197 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11198 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11199 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11200 } while (0)
11201
11202#ifndef IEM_WITH_SETJMP
11203# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11204 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11205# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11206 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11207# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11208 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11209#else
11210# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11211 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11212# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11213 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11214# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11215 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11216#endif
11217
11218#ifndef IEM_WITH_SETJMP
11219# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11220 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11221# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11222 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11223# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11224 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11225#else
11226# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11227 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11228# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11229 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11230# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11231 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11232#endif
11233
11234#ifndef IEM_WITH_SETJMP
11235# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11236 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11237# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11238 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11239# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11240 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11241#else
11242# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11243 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11244# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11245 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11246# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11247 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11248#endif
11249
11250#ifdef SOME_UNUSED_FUNCTION
11251# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11252 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11253#endif
11254
11255#ifndef IEM_WITH_SETJMP
11256# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11257 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11258# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11259 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11260# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11261 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11262# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11263 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11264#else
11265# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11266 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11267# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11268 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11269# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11270 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11271# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11272 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11273#endif
11274
11275#ifndef IEM_WITH_SETJMP
11276# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11277 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11278# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11279 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11280# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11281 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11282#else
11283# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11284 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11285# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11286 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11287# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11288 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11289#endif
11290
11291#ifndef IEM_WITH_SETJMP
11292# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11293 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11294# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11295 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11296#else
11297# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11298 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11299# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11300 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11301#endif
11302
11303
11304
11305#ifndef IEM_WITH_SETJMP
11306# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11307 do { \
11308 uint8_t u8Tmp; \
11309 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11310 (a_u16Dst) = u8Tmp; \
11311 } while (0)
11312# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11313 do { \
11314 uint8_t u8Tmp; \
11315 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11316 (a_u32Dst) = u8Tmp; \
11317 } while (0)
11318# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11319 do { \
11320 uint8_t u8Tmp; \
11321 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11322 (a_u64Dst) = u8Tmp; \
11323 } while (0)
11324# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11325 do { \
11326 uint16_t u16Tmp; \
11327 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11328 (a_u32Dst) = u16Tmp; \
11329 } while (0)
11330# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11331 do { \
11332 uint16_t u16Tmp; \
11333 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11334 (a_u64Dst) = u16Tmp; \
11335 } while (0)
11336# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11337 do { \
11338 uint32_t u32Tmp; \
11339 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11340 (a_u64Dst) = u32Tmp; \
11341 } while (0)
11342#else /* IEM_WITH_SETJMP */
11343# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11344 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11345# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11346 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11347# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11348 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11349# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11350 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11351# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11352 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11353# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11354 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11355#endif /* IEM_WITH_SETJMP */
11356
11357#ifndef IEM_WITH_SETJMP
11358# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11359 do { \
11360 uint8_t u8Tmp; \
11361 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11362 (a_u16Dst) = (int8_t)u8Tmp; \
11363 } while (0)
11364# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11365 do { \
11366 uint8_t u8Tmp; \
11367 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11368 (a_u32Dst) = (int8_t)u8Tmp; \
11369 } while (0)
11370# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11371 do { \
11372 uint8_t u8Tmp; \
11373 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11374 (a_u64Dst) = (int8_t)u8Tmp; \
11375 } while (0)
11376# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11377 do { \
11378 uint16_t u16Tmp; \
11379 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11380 (a_u32Dst) = (int16_t)u16Tmp; \
11381 } while (0)
11382# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11383 do { \
11384 uint16_t u16Tmp; \
11385 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11386 (a_u64Dst) = (int16_t)u16Tmp; \
11387 } while (0)
11388# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11389 do { \
11390 uint32_t u32Tmp; \
11391 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11392 (a_u64Dst) = (int32_t)u32Tmp; \
11393 } while (0)
11394#else /* IEM_WITH_SETJMP */
11395# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11396 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11397# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11398 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11399# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11400 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11401# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11402 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11403# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11404 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11405# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11406 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11407#endif /* IEM_WITH_SETJMP */
11408
11409#ifndef IEM_WITH_SETJMP
11410# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11411 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11412# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11413 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11414# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11415 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11416# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11417 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11418#else
11419# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11420 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11421# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11422 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11423# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11424 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11425# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11426 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11427#endif
11428
11429#ifndef IEM_WITH_SETJMP
11430# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11431 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11432# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11433 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11434# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11435 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11436# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11437 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11438#else
11439# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11440 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11441# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11442 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11443# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11444 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11445# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11446 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11447#endif
11448
11449#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11450#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11451#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11452#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11453#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11454#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11455#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11456 do { \
11457 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11458 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11459 } while (0)
11460
11461#ifndef IEM_WITH_SETJMP
11462# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11463 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11464# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11465 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11466#else
11467# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11468 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11469# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11470 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11471#endif
11472
11473
11474#define IEM_MC_PUSH_U16(a_u16Value) \
11475 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11476#define IEM_MC_PUSH_U32(a_u32Value) \
11477 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11478#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11479 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11480#define IEM_MC_PUSH_U64(a_u64Value) \
11481 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11482
11483#define IEM_MC_POP_U16(a_pu16Value) \
11484 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11485#define IEM_MC_POP_U32(a_pu32Value) \
11486 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11487#define IEM_MC_POP_U64(a_pu64Value) \
11488 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11489
11490/** Maps guest memory for direct or bounce buffered access.
11491 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11492 * @remarks May return.
11493 */
11494#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11495 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11496
11497/** Maps guest memory for direct or bounce buffered access.
11498 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11499 * @remarks May return.
11500 */
11501#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11502 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11503
11504/** Commits the memory and unmaps the guest memory.
11505 * @remarks May return.
11506 */
11507#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11508 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11509
11510/** Commits the memory and unmaps the guest memory unless the FPU status word
11511 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11512 * that would cause FLD not to store.
11513 *
11514 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11515 * store, while \#P will not.
11516 *
11517 * @remarks May in theory return - for now.
11518 */
11519#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11520 do { \
11521 if ( !(a_u16FSW & X86_FSW_ES) \
11522 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11523 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11524 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11525 } while (0)
11526
11527/** Calculate efficient address from R/M. */
11528#ifndef IEM_WITH_SETJMP
11529# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11530 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11531#else
11532# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11533 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11534#endif
11535
11536#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11537#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11538#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11539#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11540#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11541#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11542#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11543
11544/**
11545 * Defers the rest of the instruction emulation to a C implementation routine
11546 * and returns, only taking the standard parameters.
11547 *
11548 * @param a_pfnCImpl The pointer to the C routine.
11549 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11550 */
11551#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11552
11553/**
11554 * Defers the rest of instruction emulation to a C implementation routine and
11555 * returns, taking one argument in addition to the standard ones.
11556 *
11557 * @param a_pfnCImpl The pointer to the C routine.
11558 * @param a0 The argument.
11559 */
11560#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11561
11562/**
11563 * Defers the rest of the instruction emulation to a C implementation routine
11564 * and returns, taking two arguments in addition to the standard ones.
11565 *
11566 * @param a_pfnCImpl The pointer to the C routine.
11567 * @param a0 The first extra argument.
11568 * @param a1 The second extra argument.
11569 */
11570#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11571
11572/**
11573 * Defers the rest of the instruction emulation to a C implementation routine
11574 * and returns, taking three arguments in addition to the standard ones.
11575 *
11576 * @param a_pfnCImpl The pointer to the C routine.
11577 * @param a0 The first extra argument.
11578 * @param a1 The second extra argument.
11579 * @param a2 The third extra argument.
11580 */
11581#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11582
11583/**
11584 * Defers the rest of the instruction emulation to a C implementation routine
11585 * and returns, taking four arguments in addition to the standard ones.
11586 *
11587 * @param a_pfnCImpl The pointer to the C routine.
11588 * @param a0 The first extra argument.
11589 * @param a1 The second extra argument.
11590 * @param a2 The third extra argument.
11591 * @param a3 The fourth extra argument.
11592 */
11593#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11594
11595/**
11596 * Defers the rest of the instruction emulation to a C implementation routine
11597 * and returns, taking two arguments in addition to the standard ones.
11598 *
11599 * @param a_pfnCImpl The pointer to the C routine.
11600 * @param a0 The first extra argument.
11601 * @param a1 The second extra argument.
11602 * @param a2 The third extra argument.
11603 * @param a3 The fourth extra argument.
11604 * @param a4 The fifth extra argument.
11605 */
11606#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11607
11608/**
11609 * Defers the entire instruction emulation to a C implementation routine and
11610 * returns, only taking the standard parameters.
11611 *
11612 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11613 *
11614 * @param a_pfnCImpl The pointer to the C routine.
11615 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11616 */
11617#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11618
11619/**
11620 * Defers the entire instruction emulation to a C implementation routine and
11621 * returns, taking one argument in addition to the standard ones.
11622 *
11623 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11624 *
11625 * @param a_pfnCImpl The pointer to the C routine.
11626 * @param a0 The argument.
11627 */
11628#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11629
11630/**
11631 * Defers the entire instruction emulation to a C implementation routine and
11632 * returns, taking two arguments in addition to the standard ones.
11633 *
11634 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11635 *
11636 * @param a_pfnCImpl The pointer to the C routine.
11637 * @param a0 The first extra argument.
11638 * @param a1 The second extra argument.
11639 */
11640#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11641
11642/**
11643 * Defers the entire instruction emulation to a C implementation routine and
11644 * returns, taking three arguments in addition to the standard ones.
11645 *
11646 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11647 *
11648 * @param a_pfnCImpl The pointer to the C routine.
11649 * @param a0 The first extra argument.
11650 * @param a1 The second extra argument.
11651 * @param a2 The third extra argument.
11652 */
11653#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11654
11655/**
11656 * Calls a FPU assembly implementation taking one visible argument.
11657 *
11658 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11659 * @param a0 The first extra argument.
11660 */
11661#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11662 do { \
11663 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11664 } while (0)
11665
11666/**
11667 * Calls a FPU assembly implementation taking two visible arguments.
11668 *
11669 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11670 * @param a0 The first extra argument.
11671 * @param a1 The second extra argument.
11672 */
11673#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11674 do { \
11675 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11676 } while (0)
11677
11678/**
11679 * Calls a FPU assembly implementation taking three visible arguments.
11680 *
11681 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11682 * @param a0 The first extra argument.
11683 * @param a1 The second extra argument.
11684 * @param a2 The third extra argument.
11685 */
11686#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11687 do { \
11688 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11689 } while (0)
11690
11691#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11692 do { \
11693 (a_FpuData).FSW = (a_FSW); \
11694 (a_FpuData).r80Result = *(a_pr80Value); \
11695 } while (0)
11696
11697/** Pushes FPU result onto the stack. */
11698#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11699 iemFpuPushResult(pVCpu, &a_FpuData)
11700/** Pushes FPU result onto the stack and sets the FPUDP. */
11701#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11702 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11703
11704/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11705#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11706 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11707
11708/** Stores FPU result in a stack register. */
11709#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11710 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11711/** Stores FPU result in a stack register and pops the stack. */
11712#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11713 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11714/** Stores FPU result in a stack register and sets the FPUDP. */
11715#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11716 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11717/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11718 * stack. */
11719#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11720 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11721
11722/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11723#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11724 iemFpuUpdateOpcodeAndIp(pVCpu)
11725/** Free a stack register (for FFREE and FFREEP). */
11726#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11727 iemFpuStackFree(pVCpu, a_iStReg)
11728/** Increment the FPU stack pointer. */
11729#define IEM_MC_FPU_STACK_INC_TOP() \
11730 iemFpuStackIncTop(pVCpu)
11731/** Decrement the FPU stack pointer. */
11732#define IEM_MC_FPU_STACK_DEC_TOP() \
11733 iemFpuStackDecTop(pVCpu)
11734
11735/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11736#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11737 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11738/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11739#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11740 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11741/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11742#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11743 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11744/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11745#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11746 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11747/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11748 * stack. */
11749#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11750 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11751/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11752#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11753 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11754
11755/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11756#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11757 iemFpuStackUnderflow(pVCpu, a_iStDst)
11758/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11759 * stack. */
11760#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11761 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11762/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11763 * FPUDS. */
11764#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11765 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11766/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11767 * FPUDS. Pops stack. */
11768#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11769 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11770/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11771 * stack twice. */
11772#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11773 iemFpuStackUnderflowThenPopPop(pVCpu)
11774/** Raises a FPU stack underflow exception for an instruction pushing a result
11775 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11776#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11777 iemFpuStackPushUnderflow(pVCpu)
11778/** Raises a FPU stack underflow exception for an instruction pushing a result
11779 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11780#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11781 iemFpuStackPushUnderflowTwo(pVCpu)
11782
11783/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11784 * FPUIP, FPUCS and FOP. */
11785#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11786 iemFpuStackPushOverflow(pVCpu)
11787/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11788 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11789#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11790 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11791/** Prepares for using the FPU state.
11792 * Ensures that we can use the host FPU in the current context (RC+R0.
11793 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11794#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11795/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11796#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11797/** Actualizes the guest FPU state so it can be accessed and modified. */
11798#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11799
11800/** Prepares for using the SSE state.
11801 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11802 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11803#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11804/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
11805#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11806/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
11807#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11808
11809/**
11810 * Calls a MMX assembly implementation taking two visible arguments.
11811 *
11812 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11813 * @param a0 The first extra argument.
11814 * @param a1 The second extra argument.
11815 */
11816#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11817 do { \
11818 IEM_MC_PREPARE_FPU_USAGE(); \
11819 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11820 } while (0)
11821
11822/**
11823 * Calls a MMX assembly implementation taking three visible arguments.
11824 *
11825 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11826 * @param a0 The first extra argument.
11827 * @param a1 The second extra argument.
11828 * @param a2 The third extra argument.
11829 */
11830#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11831 do { \
11832 IEM_MC_PREPARE_FPU_USAGE(); \
11833 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11834 } while (0)
11835
11836
11837/**
11838 * Calls a SSE assembly implementation taking two visible arguments.
11839 *
11840 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11841 * @param a0 The first extra argument.
11842 * @param a1 The second extra argument.
11843 */
11844#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11845 do { \
11846 IEM_MC_PREPARE_SSE_USAGE(); \
11847 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11848 } while (0)
11849
11850/**
11851 * Calls a SSE assembly implementation taking three visible arguments.
11852 *
11853 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11854 * @param a0 The first extra argument.
11855 * @param a1 The second extra argument.
11856 * @param a2 The third extra argument.
11857 */
11858#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11859 do { \
11860 IEM_MC_PREPARE_SSE_USAGE(); \
11861 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11862 } while (0)
11863
11864/** @note Not for IOPL or IF testing. */
11865#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11866/** @note Not for IOPL or IF testing. */
11867#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11868/** @note Not for IOPL or IF testing. */
11869#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11870/** @note Not for IOPL or IF testing. */
11871#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11872/** @note Not for IOPL or IF testing. */
11873#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11874 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11875 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11876/** @note Not for IOPL or IF testing. */
11877#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11878 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11879 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11880/** @note Not for IOPL or IF testing. */
11881#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11882 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11883 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11884 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11885/** @note Not for IOPL or IF testing. */
11886#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11887 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11888 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11889 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11890#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11891#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11892#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11893/** @note Not for IOPL or IF testing. */
11894#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11895 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11896 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11897/** @note Not for IOPL or IF testing. */
11898#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11899 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11900 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11901/** @note Not for IOPL or IF testing. */
11902#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11903 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11904 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11905/** @note Not for IOPL or IF testing. */
11906#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11907 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11908 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11909/** @note Not for IOPL or IF testing. */
11910#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11911 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11912 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11913/** @note Not for IOPL or IF testing. */
11914#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11915 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11916 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11917#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11918#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11919
11920#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11921 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11922#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11923 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11924#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11925 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11926#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11927 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11928#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11929 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11930#define IEM_MC_IF_FCW_IM() \
11931 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11932
11933#define IEM_MC_ELSE() } else {
11934#define IEM_MC_ENDIF() } do {} while (0)
11935
11936/** @} */
11937
11938
11939/** @name Opcode Debug Helpers.
11940 * @{
11941 */
11942#ifdef VBOX_WITH_STATISTICS
11943# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
11944#else
11945# define IEMOP_INC_STATS(a_Stats) do { } while (0)
11946#endif
11947
11948#ifdef DEBUG
11949# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
11950 do { \
11951 IEMOP_INC_STATS(a_Stats); \
11952 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11953 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
11954 } while (0)
11955
11956# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11957 do { \
11958 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11959 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11960 (void)RT_CONCAT(OP_,a_Upper); \
11961 (void)(a_fDisHints); \
11962 (void)(a_fIemHints); \
11963 } while (0)
11964
11965# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11966 do { \
11967 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11968 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11969 (void)RT_CONCAT(OP_,a_Upper); \
11970 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11971 (void)(a_fDisHints); \
11972 (void)(a_fIemHints); \
11973 } while (0)
11974
11975# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11976 do { \
11977 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11978 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11979 (void)RT_CONCAT(OP_,a_Upper); \
11980 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11981 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11982 (void)(a_fDisHints); \
11983 (void)(a_fIemHints); \
11984 } while (0)
11985
11986# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11987 do { \
11988 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11989 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11990 (void)RT_CONCAT(OP_,a_Upper); \
11991 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11992 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11993 (void)RT_CONCAT(OP_PARM_,a_Op3); \
11994 (void)(a_fDisHints); \
11995 (void)(a_fIemHints); \
11996 } while (0)
11997
11998# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11999 do { \
12000 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12001 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12002 (void)RT_CONCAT(OP_,a_Upper); \
12003 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12004 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12005 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12006 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12007 (void)(a_fDisHints); \
12008 (void)(a_fIemHints); \
12009 } while (0)
12010
12011#else
12012# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12013
12014# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12015 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12016# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12017 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12018# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12019 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12020# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12021 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12022# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12023 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12024
12025#endif
12026
12027#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12028 IEMOP_MNEMONIC0EX(a_Lower, \
12029 #a_Lower, \
12030 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12031#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12032 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12033 #a_Lower " " #a_Op1, \
12034 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12035#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12036 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12037 #a_Lower " " #a_Op1 "," #a_Op2, \
12038 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12039#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12040 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12041 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12042 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12043#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12044 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12045 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12046 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12047
12048/** @} */
12049
12050
12051/** @name Opcode Helpers.
12052 * @{
12053 */
12054
12055#ifdef IN_RING3
12056# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12057 do { \
12058 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12059 else \
12060 { \
12061 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12062 return IEMOP_RAISE_INVALID_OPCODE(); \
12063 } \
12064 } while (0)
12065#else
12066# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12067 do { \
12068 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12069 else return IEMOP_RAISE_INVALID_OPCODE(); \
12070 } while (0)
12071#endif
12072
12073/** The instruction requires a 186 or later. */
12074#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12075# define IEMOP_HLP_MIN_186() do { } while (0)
12076#else
12077# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12078#endif
12079
12080/** The instruction requires a 286 or later. */
12081#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12082# define IEMOP_HLP_MIN_286() do { } while (0)
12083#else
12084# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12085#endif
12086
12087/** The instruction requires a 386 or later. */
12088#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12089# define IEMOP_HLP_MIN_386() do { } while (0)
12090#else
12091# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12092#endif
12093
12094/** The instruction requires a 386 or later if the given expression is true. */
12095#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12096# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12097#else
12098# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12099#endif
12100
12101/** The instruction requires a 486 or later. */
12102#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12103# define IEMOP_HLP_MIN_486() do { } while (0)
12104#else
12105# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12106#endif
12107
12108/** The instruction requires a Pentium (586) or later. */
12109#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12110# define IEMOP_HLP_MIN_586() do { } while (0)
12111#else
12112# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12113#endif
12114
12115/** The instruction requires a PentiumPro (686) or later. */
12116#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12117# define IEMOP_HLP_MIN_686() do { } while (0)
12118#else
12119# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12120#endif
12121
12122
12123/** The instruction raises an \#UD in real and V8086 mode. */
12124#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12125 do \
12126 { \
12127 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12128 else return IEMOP_RAISE_INVALID_OPCODE(); \
12129 } while (0)
12130
12131/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12132 * 64-bit mode. */
12133#define IEMOP_HLP_NO_64BIT() \
12134 do \
12135 { \
12136 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12137 return IEMOP_RAISE_INVALID_OPCODE(); \
12138 } while (0)
12139
12140/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12141 * 64-bit mode. */
12142#define IEMOP_HLP_ONLY_64BIT() \
12143 do \
12144 { \
12145 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12146 return IEMOP_RAISE_INVALID_OPCODE(); \
12147 } while (0)
12148
12149/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12150#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12151 do \
12152 { \
12153 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12154 iemRecalEffOpSize64Default(pVCpu); \
12155 } while (0)
12156
12157/** The instruction has 64-bit operand size if 64-bit mode. */
12158#define IEMOP_HLP_64BIT_OP_SIZE() \
12159 do \
12160 { \
12161 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12162 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12163 } while (0)
12164
12165/** Only a REX prefix immediately preceeding the first opcode byte takes
12166 * effect. This macro helps ensuring this as well as logging bad guest code. */
12167#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12168 do \
12169 { \
12170 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12171 { \
12172 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12173 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12174 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12175 pVCpu->iem.s.uRexB = 0; \
12176 pVCpu->iem.s.uRexIndex = 0; \
12177 pVCpu->iem.s.uRexReg = 0; \
12178 iemRecalEffOpSize(pVCpu); \
12179 } \
12180 } while (0)
12181
12182/**
12183 * Done decoding.
12184 */
12185#define IEMOP_HLP_DONE_DECODING() \
12186 do \
12187 { \
12188 /*nothing for now, maybe later... */ \
12189 } while (0)
12190
12191/**
12192 * Done decoding, raise \#UD exception if lock prefix present.
12193 */
12194#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12195 do \
12196 { \
12197 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12198 { /* likely */ } \
12199 else \
12200 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12201 } while (0)
12202
12203#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12204 do \
12205 { \
12206 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12207 { /* likely */ } \
12208 else \
12209 { \
12210 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12211 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12212 } \
12213 } while (0)
12214#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12215 do \
12216 { \
12217 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12218 { /* likely */ } \
12219 else \
12220 { \
12221 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12222 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12223 } \
12224 } while (0)
12225
12226/**
12227 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12228 * are present.
12229 */
12230#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12231 do \
12232 { \
12233 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12234 { /* likely */ } \
12235 else \
12236 return IEMOP_RAISE_INVALID_OPCODE(); \
12237 } while (0)
12238
12239
12240/**
12241 * Done decoding VEX.
12242 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, or if
12243 * we're in real or v8086 mode.
12244 */
12245#define IEMOP_HLP_DONE_VEX_DECODING() \
12246 do \
12247 { \
12248 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12249 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12250 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12251 { /* likely */ } \
12252 else \
12253 return IEMOP_RAISE_INVALID_OPCODE(); \
12254 } while (0)
12255
12256/**
12257 * Done decoding VEX, no V, no L.
12258 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12259 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12260 */
12261#define IEMOP_HLP_DONE_VEX_DECODING_L_ZERO_NO_VVV() \
12262 do \
12263 { \
12264 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12265 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12266 && pVCpu->iem.s.uVexLength == 0 \
12267 && pVCpu->iem.s.uVex3rdReg == 0 \
12268 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12269 { /* likely */ } \
12270 else \
12271 return IEMOP_RAISE_INVALID_OPCODE(); \
12272 } while (0)
12273
12274#ifdef VBOX_WITH_NESTED_HWVIRT
12275/** Check and handles SVM nested-guest control & instruction intercept. */
12276# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12277 do \
12278 { \
12279 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12280 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12281 } while (0)
12282
12283/** Check and handle SVM nested-guest CR0 read intercept. */
12284# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12285 do \
12286 { \
12287 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12288 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12289 } while (0)
12290
12291#else
12292# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12293# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12294
12295#endif /* VBOX_WITH_NESTED_HWVIRT */
12296
12297
12298/**
12299 * Calculates the effective address of a ModR/M memory operand.
12300 *
12301 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12302 *
12303 * @return Strict VBox status code.
12304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12305 * @param bRm The ModRM byte.
12306 * @param cbImm The size of any immediate following the
12307 * effective address opcode bytes. Important for
12308 * RIP relative addressing.
12309 * @param pGCPtrEff Where to return the effective address.
12310 */
12311IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12312{
12313 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12314 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12315# define SET_SS_DEF() \
12316 do \
12317 { \
12318 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12319 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12320 } while (0)
12321
12322 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12323 {
12324/** @todo Check the effective address size crap! */
12325 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12326 {
12327 uint16_t u16EffAddr;
12328
12329 /* Handle the disp16 form with no registers first. */
12330 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12331 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12332 else
12333 {
12334 /* Get the displacment. */
12335 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12336 {
12337 case 0: u16EffAddr = 0; break;
12338 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12339 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12340 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12341 }
12342
12343 /* Add the base and index registers to the disp. */
12344 switch (bRm & X86_MODRM_RM_MASK)
12345 {
12346 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12347 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12348 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12349 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12350 case 4: u16EffAddr += pCtx->si; break;
12351 case 5: u16EffAddr += pCtx->di; break;
12352 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12353 case 7: u16EffAddr += pCtx->bx; break;
12354 }
12355 }
12356
12357 *pGCPtrEff = u16EffAddr;
12358 }
12359 else
12360 {
12361 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12362 uint32_t u32EffAddr;
12363
12364 /* Handle the disp32 form with no registers first. */
12365 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12366 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12367 else
12368 {
12369 /* Get the register (or SIB) value. */
12370 switch ((bRm & X86_MODRM_RM_MASK))
12371 {
12372 case 0: u32EffAddr = pCtx->eax; break;
12373 case 1: u32EffAddr = pCtx->ecx; break;
12374 case 2: u32EffAddr = pCtx->edx; break;
12375 case 3: u32EffAddr = pCtx->ebx; break;
12376 case 4: /* SIB */
12377 {
12378 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12379
12380 /* Get the index and scale it. */
12381 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12382 {
12383 case 0: u32EffAddr = pCtx->eax; break;
12384 case 1: u32EffAddr = pCtx->ecx; break;
12385 case 2: u32EffAddr = pCtx->edx; break;
12386 case 3: u32EffAddr = pCtx->ebx; break;
12387 case 4: u32EffAddr = 0; /*none */ break;
12388 case 5: u32EffAddr = pCtx->ebp; break;
12389 case 6: u32EffAddr = pCtx->esi; break;
12390 case 7: u32EffAddr = pCtx->edi; break;
12391 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12392 }
12393 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12394
12395 /* add base */
12396 switch (bSib & X86_SIB_BASE_MASK)
12397 {
12398 case 0: u32EffAddr += pCtx->eax; break;
12399 case 1: u32EffAddr += pCtx->ecx; break;
12400 case 2: u32EffAddr += pCtx->edx; break;
12401 case 3: u32EffAddr += pCtx->ebx; break;
12402 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12403 case 5:
12404 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12405 {
12406 u32EffAddr += pCtx->ebp;
12407 SET_SS_DEF();
12408 }
12409 else
12410 {
12411 uint32_t u32Disp;
12412 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12413 u32EffAddr += u32Disp;
12414 }
12415 break;
12416 case 6: u32EffAddr += pCtx->esi; break;
12417 case 7: u32EffAddr += pCtx->edi; break;
12418 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12419 }
12420 break;
12421 }
12422 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12423 case 6: u32EffAddr = pCtx->esi; break;
12424 case 7: u32EffAddr = pCtx->edi; break;
12425 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12426 }
12427
12428 /* Get and add the displacement. */
12429 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12430 {
12431 case 0:
12432 break;
12433 case 1:
12434 {
12435 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12436 u32EffAddr += i8Disp;
12437 break;
12438 }
12439 case 2:
12440 {
12441 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12442 u32EffAddr += u32Disp;
12443 break;
12444 }
12445 default:
12446 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12447 }
12448
12449 }
12450 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12451 *pGCPtrEff = u32EffAddr;
12452 else
12453 {
12454 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12455 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12456 }
12457 }
12458 }
12459 else
12460 {
12461 uint64_t u64EffAddr;
12462
12463 /* Handle the rip+disp32 form with no registers first. */
12464 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12465 {
12466 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12467 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12468 }
12469 else
12470 {
12471 /* Get the register (or SIB) value. */
12472 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12473 {
12474 case 0: u64EffAddr = pCtx->rax; break;
12475 case 1: u64EffAddr = pCtx->rcx; break;
12476 case 2: u64EffAddr = pCtx->rdx; break;
12477 case 3: u64EffAddr = pCtx->rbx; break;
12478 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12479 case 6: u64EffAddr = pCtx->rsi; break;
12480 case 7: u64EffAddr = pCtx->rdi; break;
12481 case 8: u64EffAddr = pCtx->r8; break;
12482 case 9: u64EffAddr = pCtx->r9; break;
12483 case 10: u64EffAddr = pCtx->r10; break;
12484 case 11: u64EffAddr = pCtx->r11; break;
12485 case 13: u64EffAddr = pCtx->r13; break;
12486 case 14: u64EffAddr = pCtx->r14; break;
12487 case 15: u64EffAddr = pCtx->r15; break;
12488 /* SIB */
12489 case 4:
12490 case 12:
12491 {
12492 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12493
12494 /* Get the index and scale it. */
12495 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12496 {
12497 case 0: u64EffAddr = pCtx->rax; break;
12498 case 1: u64EffAddr = pCtx->rcx; break;
12499 case 2: u64EffAddr = pCtx->rdx; break;
12500 case 3: u64EffAddr = pCtx->rbx; break;
12501 case 4: u64EffAddr = 0; /*none */ break;
12502 case 5: u64EffAddr = pCtx->rbp; break;
12503 case 6: u64EffAddr = pCtx->rsi; break;
12504 case 7: u64EffAddr = pCtx->rdi; break;
12505 case 8: u64EffAddr = pCtx->r8; break;
12506 case 9: u64EffAddr = pCtx->r9; break;
12507 case 10: u64EffAddr = pCtx->r10; break;
12508 case 11: u64EffAddr = pCtx->r11; break;
12509 case 12: u64EffAddr = pCtx->r12; break;
12510 case 13: u64EffAddr = pCtx->r13; break;
12511 case 14: u64EffAddr = pCtx->r14; break;
12512 case 15: u64EffAddr = pCtx->r15; break;
12513 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12514 }
12515 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12516
12517 /* add base */
12518 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12519 {
12520 case 0: u64EffAddr += pCtx->rax; break;
12521 case 1: u64EffAddr += pCtx->rcx; break;
12522 case 2: u64EffAddr += pCtx->rdx; break;
12523 case 3: u64EffAddr += pCtx->rbx; break;
12524 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12525 case 6: u64EffAddr += pCtx->rsi; break;
12526 case 7: u64EffAddr += pCtx->rdi; break;
12527 case 8: u64EffAddr += pCtx->r8; break;
12528 case 9: u64EffAddr += pCtx->r9; break;
12529 case 10: u64EffAddr += pCtx->r10; break;
12530 case 11: u64EffAddr += pCtx->r11; break;
12531 case 12: u64EffAddr += pCtx->r12; break;
12532 case 14: u64EffAddr += pCtx->r14; break;
12533 case 15: u64EffAddr += pCtx->r15; break;
12534 /* complicated encodings */
12535 case 5:
12536 case 13:
12537 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12538 {
12539 if (!pVCpu->iem.s.uRexB)
12540 {
12541 u64EffAddr += pCtx->rbp;
12542 SET_SS_DEF();
12543 }
12544 else
12545 u64EffAddr += pCtx->r13;
12546 }
12547 else
12548 {
12549 uint32_t u32Disp;
12550 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12551 u64EffAddr += (int32_t)u32Disp;
12552 }
12553 break;
12554 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12555 }
12556 break;
12557 }
12558 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12559 }
12560
12561 /* Get and add the displacement. */
12562 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12563 {
12564 case 0:
12565 break;
12566 case 1:
12567 {
12568 int8_t i8Disp;
12569 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12570 u64EffAddr += i8Disp;
12571 break;
12572 }
12573 case 2:
12574 {
12575 uint32_t u32Disp;
12576 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12577 u64EffAddr += (int32_t)u32Disp;
12578 break;
12579 }
12580 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12581 }
12582
12583 }
12584
12585 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12586 *pGCPtrEff = u64EffAddr;
12587 else
12588 {
12589 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12590 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12591 }
12592 }
12593
12594 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12595 return VINF_SUCCESS;
12596}
12597
12598
12599/**
12600 * Calculates the effective address of a ModR/M memory operand.
12601 *
12602 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12603 *
12604 * @return Strict VBox status code.
12605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12606 * @param bRm The ModRM byte.
12607 * @param cbImm The size of any immediate following the
12608 * effective address opcode bytes. Important for
12609 * RIP relative addressing.
12610 * @param pGCPtrEff Where to return the effective address.
12611 * @param offRsp RSP displacement.
12612 */
12613IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
12614{
12615 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12616 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12617# define SET_SS_DEF() \
12618 do \
12619 { \
12620 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12621 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12622 } while (0)
12623
12624 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12625 {
12626/** @todo Check the effective address size crap! */
12627 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12628 {
12629 uint16_t u16EffAddr;
12630
12631 /* Handle the disp16 form with no registers first. */
12632 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12633 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12634 else
12635 {
12636 /* Get the displacment. */
12637 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12638 {
12639 case 0: u16EffAddr = 0; break;
12640 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12641 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12642 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12643 }
12644
12645 /* Add the base and index registers to the disp. */
12646 switch (bRm & X86_MODRM_RM_MASK)
12647 {
12648 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12649 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12650 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12651 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12652 case 4: u16EffAddr += pCtx->si; break;
12653 case 5: u16EffAddr += pCtx->di; break;
12654 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12655 case 7: u16EffAddr += pCtx->bx; break;
12656 }
12657 }
12658
12659 *pGCPtrEff = u16EffAddr;
12660 }
12661 else
12662 {
12663 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12664 uint32_t u32EffAddr;
12665
12666 /* Handle the disp32 form with no registers first. */
12667 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12668 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12669 else
12670 {
12671 /* Get the register (or SIB) value. */
12672 switch ((bRm & X86_MODRM_RM_MASK))
12673 {
12674 case 0: u32EffAddr = pCtx->eax; break;
12675 case 1: u32EffAddr = pCtx->ecx; break;
12676 case 2: u32EffAddr = pCtx->edx; break;
12677 case 3: u32EffAddr = pCtx->ebx; break;
12678 case 4: /* SIB */
12679 {
12680 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12681
12682 /* Get the index and scale it. */
12683 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12684 {
12685 case 0: u32EffAddr = pCtx->eax; break;
12686 case 1: u32EffAddr = pCtx->ecx; break;
12687 case 2: u32EffAddr = pCtx->edx; break;
12688 case 3: u32EffAddr = pCtx->ebx; break;
12689 case 4: u32EffAddr = 0; /*none */ break;
12690 case 5: u32EffAddr = pCtx->ebp; break;
12691 case 6: u32EffAddr = pCtx->esi; break;
12692 case 7: u32EffAddr = pCtx->edi; break;
12693 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12694 }
12695 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12696
12697 /* add base */
12698 switch (bSib & X86_SIB_BASE_MASK)
12699 {
12700 case 0: u32EffAddr += pCtx->eax; break;
12701 case 1: u32EffAddr += pCtx->ecx; break;
12702 case 2: u32EffAddr += pCtx->edx; break;
12703 case 3: u32EffAddr += pCtx->ebx; break;
12704 case 4:
12705 u32EffAddr += pCtx->esp + offRsp;
12706 SET_SS_DEF();
12707 break;
12708 case 5:
12709 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12710 {
12711 u32EffAddr += pCtx->ebp;
12712 SET_SS_DEF();
12713 }
12714 else
12715 {
12716 uint32_t u32Disp;
12717 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12718 u32EffAddr += u32Disp;
12719 }
12720 break;
12721 case 6: u32EffAddr += pCtx->esi; break;
12722 case 7: u32EffAddr += pCtx->edi; break;
12723 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12724 }
12725 break;
12726 }
12727 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12728 case 6: u32EffAddr = pCtx->esi; break;
12729 case 7: u32EffAddr = pCtx->edi; break;
12730 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12731 }
12732
12733 /* Get and add the displacement. */
12734 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12735 {
12736 case 0:
12737 break;
12738 case 1:
12739 {
12740 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12741 u32EffAddr += i8Disp;
12742 break;
12743 }
12744 case 2:
12745 {
12746 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12747 u32EffAddr += u32Disp;
12748 break;
12749 }
12750 default:
12751 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12752 }
12753
12754 }
12755 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12756 *pGCPtrEff = u32EffAddr;
12757 else
12758 {
12759 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12760 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12761 }
12762 }
12763 }
12764 else
12765 {
12766 uint64_t u64EffAddr;
12767
12768 /* Handle the rip+disp32 form with no registers first. */
12769 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12770 {
12771 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12772 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12773 }
12774 else
12775 {
12776 /* Get the register (or SIB) value. */
12777 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12778 {
12779 case 0: u64EffAddr = pCtx->rax; break;
12780 case 1: u64EffAddr = pCtx->rcx; break;
12781 case 2: u64EffAddr = pCtx->rdx; break;
12782 case 3: u64EffAddr = pCtx->rbx; break;
12783 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12784 case 6: u64EffAddr = pCtx->rsi; break;
12785 case 7: u64EffAddr = pCtx->rdi; break;
12786 case 8: u64EffAddr = pCtx->r8; break;
12787 case 9: u64EffAddr = pCtx->r9; break;
12788 case 10: u64EffAddr = pCtx->r10; break;
12789 case 11: u64EffAddr = pCtx->r11; break;
12790 case 13: u64EffAddr = pCtx->r13; break;
12791 case 14: u64EffAddr = pCtx->r14; break;
12792 case 15: u64EffAddr = pCtx->r15; break;
12793 /* SIB */
12794 case 4:
12795 case 12:
12796 {
12797 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12798
12799 /* Get the index and scale it. */
12800 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12801 {
12802 case 0: u64EffAddr = pCtx->rax; break;
12803 case 1: u64EffAddr = pCtx->rcx; break;
12804 case 2: u64EffAddr = pCtx->rdx; break;
12805 case 3: u64EffAddr = pCtx->rbx; break;
12806 case 4: u64EffAddr = 0; /*none */ break;
12807 case 5: u64EffAddr = pCtx->rbp; break;
12808 case 6: u64EffAddr = pCtx->rsi; break;
12809 case 7: u64EffAddr = pCtx->rdi; break;
12810 case 8: u64EffAddr = pCtx->r8; break;
12811 case 9: u64EffAddr = pCtx->r9; break;
12812 case 10: u64EffAddr = pCtx->r10; break;
12813 case 11: u64EffAddr = pCtx->r11; break;
12814 case 12: u64EffAddr = pCtx->r12; break;
12815 case 13: u64EffAddr = pCtx->r13; break;
12816 case 14: u64EffAddr = pCtx->r14; break;
12817 case 15: u64EffAddr = pCtx->r15; break;
12818 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12819 }
12820 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12821
12822 /* add base */
12823 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12824 {
12825 case 0: u64EffAddr += pCtx->rax; break;
12826 case 1: u64EffAddr += pCtx->rcx; break;
12827 case 2: u64EffAddr += pCtx->rdx; break;
12828 case 3: u64EffAddr += pCtx->rbx; break;
12829 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12830 case 6: u64EffAddr += pCtx->rsi; break;
12831 case 7: u64EffAddr += pCtx->rdi; break;
12832 case 8: u64EffAddr += pCtx->r8; break;
12833 case 9: u64EffAddr += pCtx->r9; break;
12834 case 10: u64EffAddr += pCtx->r10; break;
12835 case 11: u64EffAddr += pCtx->r11; break;
12836 case 12: u64EffAddr += pCtx->r12; break;
12837 case 14: u64EffAddr += pCtx->r14; break;
12838 case 15: u64EffAddr += pCtx->r15; break;
12839 /* complicated encodings */
12840 case 5:
12841 case 13:
12842 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12843 {
12844 if (!pVCpu->iem.s.uRexB)
12845 {
12846 u64EffAddr += pCtx->rbp;
12847 SET_SS_DEF();
12848 }
12849 else
12850 u64EffAddr += pCtx->r13;
12851 }
12852 else
12853 {
12854 uint32_t u32Disp;
12855 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12856 u64EffAddr += (int32_t)u32Disp;
12857 }
12858 break;
12859 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12860 }
12861 break;
12862 }
12863 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12864 }
12865
12866 /* Get and add the displacement. */
12867 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12868 {
12869 case 0:
12870 break;
12871 case 1:
12872 {
12873 int8_t i8Disp;
12874 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12875 u64EffAddr += i8Disp;
12876 break;
12877 }
12878 case 2:
12879 {
12880 uint32_t u32Disp;
12881 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12882 u64EffAddr += (int32_t)u32Disp;
12883 break;
12884 }
12885 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12886 }
12887
12888 }
12889
12890 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12891 *pGCPtrEff = u64EffAddr;
12892 else
12893 {
12894 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12895 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12896 }
12897 }
12898
12899 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12900 return VINF_SUCCESS;
12901}
12902
12903
12904#ifdef IEM_WITH_SETJMP
12905/**
12906 * Calculates the effective address of a ModR/M memory operand.
12907 *
12908 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12909 *
12910 * May longjmp on internal error.
12911 *
12912 * @return The effective address.
12913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12914 * @param bRm The ModRM byte.
12915 * @param cbImm The size of any immediate following the
12916 * effective address opcode bytes. Important for
12917 * RIP relative addressing.
12918 */
12919IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12920{
12921 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12922 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12923# define SET_SS_DEF() \
12924 do \
12925 { \
12926 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12927 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12928 } while (0)
12929
12930 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12931 {
12932/** @todo Check the effective address size crap! */
12933 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12934 {
12935 uint16_t u16EffAddr;
12936
12937 /* Handle the disp16 form with no registers first. */
12938 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12939 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12940 else
12941 {
12942 /* Get the displacment. */
12943 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12944 {
12945 case 0: u16EffAddr = 0; break;
12946 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12947 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12948 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12949 }
12950
12951 /* Add the base and index registers to the disp. */
12952 switch (bRm & X86_MODRM_RM_MASK)
12953 {
12954 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12955 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12956 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12957 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12958 case 4: u16EffAddr += pCtx->si; break;
12959 case 5: u16EffAddr += pCtx->di; break;
12960 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12961 case 7: u16EffAddr += pCtx->bx; break;
12962 }
12963 }
12964
12965 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12966 return u16EffAddr;
12967 }
12968
12969 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12970 uint32_t u32EffAddr;
12971
12972 /* Handle the disp32 form with no registers first. */
12973 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12974 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12975 else
12976 {
12977 /* Get the register (or SIB) value. */
12978 switch ((bRm & X86_MODRM_RM_MASK))
12979 {
12980 case 0: u32EffAddr = pCtx->eax; break;
12981 case 1: u32EffAddr = pCtx->ecx; break;
12982 case 2: u32EffAddr = pCtx->edx; break;
12983 case 3: u32EffAddr = pCtx->ebx; break;
12984 case 4: /* SIB */
12985 {
12986 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12987
12988 /* Get the index and scale it. */
12989 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12990 {
12991 case 0: u32EffAddr = pCtx->eax; break;
12992 case 1: u32EffAddr = pCtx->ecx; break;
12993 case 2: u32EffAddr = pCtx->edx; break;
12994 case 3: u32EffAddr = pCtx->ebx; break;
12995 case 4: u32EffAddr = 0; /*none */ break;
12996 case 5: u32EffAddr = pCtx->ebp; break;
12997 case 6: u32EffAddr = pCtx->esi; break;
12998 case 7: u32EffAddr = pCtx->edi; break;
12999 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13000 }
13001 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13002
13003 /* add base */
13004 switch (bSib & X86_SIB_BASE_MASK)
13005 {
13006 case 0: u32EffAddr += pCtx->eax; break;
13007 case 1: u32EffAddr += pCtx->ecx; break;
13008 case 2: u32EffAddr += pCtx->edx; break;
13009 case 3: u32EffAddr += pCtx->ebx; break;
13010 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13011 case 5:
13012 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13013 {
13014 u32EffAddr += pCtx->ebp;
13015 SET_SS_DEF();
13016 }
13017 else
13018 {
13019 uint32_t u32Disp;
13020 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13021 u32EffAddr += u32Disp;
13022 }
13023 break;
13024 case 6: u32EffAddr += pCtx->esi; break;
13025 case 7: u32EffAddr += pCtx->edi; break;
13026 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13027 }
13028 break;
13029 }
13030 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13031 case 6: u32EffAddr = pCtx->esi; break;
13032 case 7: u32EffAddr = pCtx->edi; break;
13033 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13034 }
13035
13036 /* Get and add the displacement. */
13037 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13038 {
13039 case 0:
13040 break;
13041 case 1:
13042 {
13043 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13044 u32EffAddr += i8Disp;
13045 break;
13046 }
13047 case 2:
13048 {
13049 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13050 u32EffAddr += u32Disp;
13051 break;
13052 }
13053 default:
13054 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13055 }
13056 }
13057
13058 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13059 {
13060 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13061 return u32EffAddr;
13062 }
13063 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13064 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13065 return u32EffAddr & UINT16_MAX;
13066 }
13067
13068 uint64_t u64EffAddr;
13069
13070 /* Handle the rip+disp32 form with no registers first. */
13071 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13072 {
13073 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13074 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13075 }
13076 else
13077 {
13078 /* Get the register (or SIB) value. */
13079 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13080 {
13081 case 0: u64EffAddr = pCtx->rax; break;
13082 case 1: u64EffAddr = pCtx->rcx; break;
13083 case 2: u64EffAddr = pCtx->rdx; break;
13084 case 3: u64EffAddr = pCtx->rbx; break;
13085 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13086 case 6: u64EffAddr = pCtx->rsi; break;
13087 case 7: u64EffAddr = pCtx->rdi; break;
13088 case 8: u64EffAddr = pCtx->r8; break;
13089 case 9: u64EffAddr = pCtx->r9; break;
13090 case 10: u64EffAddr = pCtx->r10; break;
13091 case 11: u64EffAddr = pCtx->r11; break;
13092 case 13: u64EffAddr = pCtx->r13; break;
13093 case 14: u64EffAddr = pCtx->r14; break;
13094 case 15: u64EffAddr = pCtx->r15; break;
13095 /* SIB */
13096 case 4:
13097 case 12:
13098 {
13099 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13100
13101 /* Get the index and scale it. */
13102 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13103 {
13104 case 0: u64EffAddr = pCtx->rax; break;
13105 case 1: u64EffAddr = pCtx->rcx; break;
13106 case 2: u64EffAddr = pCtx->rdx; break;
13107 case 3: u64EffAddr = pCtx->rbx; break;
13108 case 4: u64EffAddr = 0; /*none */ break;
13109 case 5: u64EffAddr = pCtx->rbp; break;
13110 case 6: u64EffAddr = pCtx->rsi; break;
13111 case 7: u64EffAddr = pCtx->rdi; break;
13112 case 8: u64EffAddr = pCtx->r8; break;
13113 case 9: u64EffAddr = pCtx->r9; break;
13114 case 10: u64EffAddr = pCtx->r10; break;
13115 case 11: u64EffAddr = pCtx->r11; break;
13116 case 12: u64EffAddr = pCtx->r12; break;
13117 case 13: u64EffAddr = pCtx->r13; break;
13118 case 14: u64EffAddr = pCtx->r14; break;
13119 case 15: u64EffAddr = pCtx->r15; break;
13120 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13121 }
13122 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13123
13124 /* add base */
13125 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13126 {
13127 case 0: u64EffAddr += pCtx->rax; break;
13128 case 1: u64EffAddr += pCtx->rcx; break;
13129 case 2: u64EffAddr += pCtx->rdx; break;
13130 case 3: u64EffAddr += pCtx->rbx; break;
13131 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13132 case 6: u64EffAddr += pCtx->rsi; break;
13133 case 7: u64EffAddr += pCtx->rdi; break;
13134 case 8: u64EffAddr += pCtx->r8; break;
13135 case 9: u64EffAddr += pCtx->r9; break;
13136 case 10: u64EffAddr += pCtx->r10; break;
13137 case 11: u64EffAddr += pCtx->r11; break;
13138 case 12: u64EffAddr += pCtx->r12; break;
13139 case 14: u64EffAddr += pCtx->r14; break;
13140 case 15: u64EffAddr += pCtx->r15; break;
13141 /* complicated encodings */
13142 case 5:
13143 case 13:
13144 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13145 {
13146 if (!pVCpu->iem.s.uRexB)
13147 {
13148 u64EffAddr += pCtx->rbp;
13149 SET_SS_DEF();
13150 }
13151 else
13152 u64EffAddr += pCtx->r13;
13153 }
13154 else
13155 {
13156 uint32_t u32Disp;
13157 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13158 u64EffAddr += (int32_t)u32Disp;
13159 }
13160 break;
13161 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13162 }
13163 break;
13164 }
13165 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13166 }
13167
13168 /* Get and add the displacement. */
13169 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13170 {
13171 case 0:
13172 break;
13173 case 1:
13174 {
13175 int8_t i8Disp;
13176 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13177 u64EffAddr += i8Disp;
13178 break;
13179 }
13180 case 2:
13181 {
13182 uint32_t u32Disp;
13183 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13184 u64EffAddr += (int32_t)u32Disp;
13185 break;
13186 }
13187 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13188 }
13189
13190 }
13191
13192 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13193 {
13194 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13195 return u64EffAddr;
13196 }
13197 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13198 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13199 return u64EffAddr & UINT32_MAX;
13200}
13201#endif /* IEM_WITH_SETJMP */
13202
13203
13204/** @} */
13205
13206
13207
13208/*
13209 * Include the instructions
13210 */
13211#include "IEMAllInstructions.cpp.h"
13212
13213
13214
13215
13216#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13217
13218/**
13219 * Sets up execution verification mode.
13220 */
13221IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13222{
13223 PVMCPU pVCpu = pVCpu;
13224 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13225
13226 /*
13227 * Always note down the address of the current instruction.
13228 */
13229 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13230 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13231
13232 /*
13233 * Enable verification and/or logging.
13234 */
13235 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13236 if ( fNewNoRem
13237 && ( 0
13238#if 0 /* auto enable on first paged protected mode interrupt */
13239 || ( pOrgCtx->eflags.Bits.u1IF
13240 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13241 && TRPMHasTrap(pVCpu)
13242 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13243#endif
13244#if 0
13245 || ( pOrgCtx->cs == 0x10
13246 && ( pOrgCtx->rip == 0x90119e3e
13247 || pOrgCtx->rip == 0x901d9810)
13248#endif
13249#if 0 /* Auto enable DSL - FPU stuff. */
13250 || ( pOrgCtx->cs == 0x10
13251 && (// pOrgCtx->rip == 0xc02ec07f
13252 //|| pOrgCtx->rip == 0xc02ec082
13253 //|| pOrgCtx->rip == 0xc02ec0c9
13254 0
13255 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13256#endif
13257#if 0 /* Auto enable DSL - fstp st0 stuff. */
13258 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13259#endif
13260#if 0
13261 || pOrgCtx->rip == 0x9022bb3a
13262#endif
13263#if 0
13264 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13265#endif
13266#if 0
13267 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13268 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13269#endif
13270#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13271 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13272 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13273 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13274#endif
13275#if 0 /* NT4SP1 - xadd early boot. */
13276 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13277#endif
13278#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13279 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13280#endif
13281#if 0 /* NT4SP1 - cmpxchg (AMD). */
13282 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13283#endif
13284#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13285 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13286#endif
13287#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13288 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13289
13290#endif
13291#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13292 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13293
13294#endif
13295#if 0 /* NT4SP1 - frstor [ecx] */
13296 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13297#endif
13298#if 0 /* xxxxxx - All long mode code. */
13299 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13300#endif
13301#if 0 /* rep movsq linux 3.7 64-bit boot. */
13302 || (pOrgCtx->rip == 0x0000000000100241)
13303#endif
13304#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13305 || (pOrgCtx->rip == 0x000000000215e240)
13306#endif
13307#if 0 /* DOS's size-overridden iret to v8086. */
13308 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13309#endif
13310 )
13311 )
13312 {
13313 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13314 RTLogFlags(NULL, "enabled");
13315 fNewNoRem = false;
13316 }
13317 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13318 {
13319 pVCpu->iem.s.fNoRem = fNewNoRem;
13320 if (!fNewNoRem)
13321 {
13322 LogAlways(("Enabling verification mode!\n"));
13323 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13324 }
13325 else
13326 LogAlways(("Disabling verification mode!\n"));
13327 }
13328
13329 /*
13330 * Switch state.
13331 */
13332 if (IEM_VERIFICATION_ENABLED(pVCpu))
13333 {
13334 static CPUMCTX s_DebugCtx; /* Ugly! */
13335
13336 s_DebugCtx = *pOrgCtx;
13337 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13338 }
13339
13340 /*
13341 * See if there is an interrupt pending in TRPM and inject it if we can.
13342 */
13343 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13344 if ( pOrgCtx->eflags.Bits.u1IF
13345 && TRPMHasTrap(pVCpu)
13346 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13347 {
13348 uint8_t u8TrapNo;
13349 TRPMEVENT enmType;
13350 RTGCUINT uErrCode;
13351 RTGCPTR uCr2;
13352 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13353 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13354 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13355 TRPMResetTrap(pVCpu);
13356 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13357 }
13358
13359 /*
13360 * Reset the counters.
13361 */
13362 pVCpu->iem.s.cIOReads = 0;
13363 pVCpu->iem.s.cIOWrites = 0;
13364 pVCpu->iem.s.fIgnoreRaxRdx = false;
13365 pVCpu->iem.s.fOverlappingMovs = false;
13366 pVCpu->iem.s.fProblematicMemory = false;
13367 pVCpu->iem.s.fUndefinedEFlags = 0;
13368
13369 if (IEM_VERIFICATION_ENABLED(pVCpu))
13370 {
13371 /*
13372 * Free all verification records.
13373 */
13374 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13375 pVCpu->iem.s.pIemEvtRecHead = NULL;
13376 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13377 do
13378 {
13379 while (pEvtRec)
13380 {
13381 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13382 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13383 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13384 pEvtRec = pNext;
13385 }
13386 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13387 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13388 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13389 } while (pEvtRec);
13390 }
13391}
13392
13393
13394/**
13395 * Allocate an event record.
13396 * @returns Pointer to a record.
13397 */
13398IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13399{
13400 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13401 return NULL;
13402
13403 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13404 if (pEvtRec)
13405 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13406 else
13407 {
13408 if (!pVCpu->iem.s.ppIemEvtRecNext)
13409 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13410
13411 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13412 if (!pEvtRec)
13413 return NULL;
13414 }
13415 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13416 pEvtRec->pNext = NULL;
13417 return pEvtRec;
13418}
13419
13420
13421/**
13422 * IOMMMIORead notification.
13423 */
13424VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
13425{
13426 PVMCPU pVCpu = VMMGetCpu(pVM);
13427 if (!pVCpu)
13428 return;
13429 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13430 if (!pEvtRec)
13431 return;
13432 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
13433 pEvtRec->u.RamRead.GCPhys = GCPhys;
13434 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
13435 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13436 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13437}
13438
13439
13440/**
13441 * IOMMMIOWrite notification.
13442 */
13443VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
13444{
13445 PVMCPU pVCpu = VMMGetCpu(pVM);
13446 if (!pVCpu)
13447 return;
13448 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13449 if (!pEvtRec)
13450 return;
13451 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
13452 pEvtRec->u.RamWrite.GCPhys = GCPhys;
13453 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
13454 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
13455 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
13456 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
13457 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
13458 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13459 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13460}
13461
13462
13463/**
13464 * IOMIOPortRead notification.
13465 */
13466VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
13467{
13468 PVMCPU pVCpu = VMMGetCpu(pVM);
13469 if (!pVCpu)
13470 return;
13471 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13472 if (!pEvtRec)
13473 return;
13474 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13475 pEvtRec->u.IOPortRead.Port = Port;
13476 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13477 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13478 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13479}
13480
13481/**
13482 * IOMIOPortWrite notification.
13483 */
13484VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13485{
13486 PVMCPU pVCpu = VMMGetCpu(pVM);
13487 if (!pVCpu)
13488 return;
13489 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13490 if (!pEvtRec)
13491 return;
13492 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13493 pEvtRec->u.IOPortWrite.Port = Port;
13494 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13495 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13496 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13497 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13498}
13499
13500
13501VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
13502{
13503 PVMCPU pVCpu = VMMGetCpu(pVM);
13504 if (!pVCpu)
13505 return;
13506 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13507 if (!pEvtRec)
13508 return;
13509 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
13510 pEvtRec->u.IOPortStrRead.Port = Port;
13511 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
13512 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
13513 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13514 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13515}
13516
13517
13518VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
13519{
13520 PVMCPU pVCpu = VMMGetCpu(pVM);
13521 if (!pVCpu)
13522 return;
13523 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13524 if (!pEvtRec)
13525 return;
13526 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
13527 pEvtRec->u.IOPortStrWrite.Port = Port;
13528 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
13529 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
13530 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13531 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13532}
13533
13534
13535/**
13536 * Fakes and records an I/O port read.
13537 *
13538 * @returns VINF_SUCCESS.
13539 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13540 * @param Port The I/O port.
13541 * @param pu32Value Where to store the fake value.
13542 * @param cbValue The size of the access.
13543 */
13544IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13545{
13546 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13547 if (pEvtRec)
13548 {
13549 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13550 pEvtRec->u.IOPortRead.Port = Port;
13551 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13552 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13553 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13554 }
13555 pVCpu->iem.s.cIOReads++;
13556 *pu32Value = 0xcccccccc;
13557 return VINF_SUCCESS;
13558}
13559
13560
13561/**
13562 * Fakes and records an I/O port write.
13563 *
13564 * @returns VINF_SUCCESS.
13565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13566 * @param Port The I/O port.
13567 * @param u32Value The value being written.
13568 * @param cbValue The size of the access.
13569 */
13570IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13571{
13572 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13573 if (pEvtRec)
13574 {
13575 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13576 pEvtRec->u.IOPortWrite.Port = Port;
13577 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13578 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13579 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13580 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13581 }
13582 pVCpu->iem.s.cIOWrites++;
13583 return VINF_SUCCESS;
13584}
13585
13586
13587/**
13588 * Used to add extra details about a stub case.
13589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13590 */
13591IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
13592{
13593 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13594 PVM pVM = pVCpu->CTX_SUFF(pVM);
13595 PVMCPU pVCpu = pVCpu;
13596 char szRegs[4096];
13597 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
13598 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
13599 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
13600 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
13601 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
13602 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
13603 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
13604 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
13605 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
13606 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
13607 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
13608 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
13609 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
13610 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
13611 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
13612 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
13613 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
13614 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
13615 " efer=%016VR{efer}\n"
13616 " pat=%016VR{pat}\n"
13617 " sf_mask=%016VR{sf_mask}\n"
13618 "krnl_gs_base=%016VR{krnl_gs_base}\n"
13619 " lstar=%016VR{lstar}\n"
13620 " star=%016VR{star} cstar=%016VR{cstar}\n"
13621 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
13622 );
13623
13624 char szInstr1[256];
13625 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
13626 DBGF_DISAS_FLAGS_DEFAULT_MODE,
13627 szInstr1, sizeof(szInstr1), NULL);
13628 char szInstr2[256];
13629 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
13630 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13631 szInstr2, sizeof(szInstr2), NULL);
13632
13633 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
13634}
13635
13636
13637/**
13638 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
13639 * dump to the assertion info.
13640 *
13641 * @param pEvtRec The record to dump.
13642 */
13643IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
13644{
13645 switch (pEvtRec->enmEvent)
13646 {
13647 case IEMVERIFYEVENT_IOPORT_READ:
13648 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
13649 pEvtRec->u.IOPortWrite.Port,
13650 pEvtRec->u.IOPortWrite.cbValue);
13651 break;
13652 case IEMVERIFYEVENT_IOPORT_WRITE:
13653 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
13654 pEvtRec->u.IOPortWrite.Port,
13655 pEvtRec->u.IOPortWrite.cbValue,
13656 pEvtRec->u.IOPortWrite.u32Value);
13657 break;
13658 case IEMVERIFYEVENT_IOPORT_STR_READ:
13659 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
13660 pEvtRec->u.IOPortStrWrite.Port,
13661 pEvtRec->u.IOPortStrWrite.cbValue,
13662 pEvtRec->u.IOPortStrWrite.cTransfers);
13663 break;
13664 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13665 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
13666 pEvtRec->u.IOPortStrWrite.Port,
13667 pEvtRec->u.IOPortStrWrite.cbValue,
13668 pEvtRec->u.IOPortStrWrite.cTransfers);
13669 break;
13670 case IEMVERIFYEVENT_RAM_READ:
13671 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
13672 pEvtRec->u.RamRead.GCPhys,
13673 pEvtRec->u.RamRead.cb);
13674 break;
13675 case IEMVERIFYEVENT_RAM_WRITE:
13676 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
13677 pEvtRec->u.RamWrite.GCPhys,
13678 pEvtRec->u.RamWrite.cb,
13679 (int)pEvtRec->u.RamWrite.cb,
13680 pEvtRec->u.RamWrite.ab);
13681 break;
13682 default:
13683 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
13684 break;
13685 }
13686}
13687
13688
13689/**
13690 * Raises an assertion on the specified record, showing the given message with
13691 * a record dump attached.
13692 *
13693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13694 * @param pEvtRec1 The first record.
13695 * @param pEvtRec2 The second record.
13696 * @param pszMsg The message explaining why we're asserting.
13697 */
13698IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13699{
13700 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13701 iemVerifyAssertAddRecordDump(pEvtRec1);
13702 iemVerifyAssertAddRecordDump(pEvtRec2);
13703 iemVerifyAssertMsg2(pVCpu);
13704 RTAssertPanic();
13705}
13706
13707
13708/**
13709 * Raises an assertion on the specified record, showing the given message with
13710 * a record dump attached.
13711 *
13712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13713 * @param pEvtRec1 The first record.
13714 * @param pszMsg The message explaining why we're asserting.
13715 */
13716IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13717{
13718 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13719 iemVerifyAssertAddRecordDump(pEvtRec);
13720 iemVerifyAssertMsg2(pVCpu);
13721 RTAssertPanic();
13722}
13723
13724
13725/**
13726 * Verifies a write record.
13727 *
13728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13729 * @param pEvtRec The write record.
13730 * @param fRem Set if REM was doing the other executing. If clear
13731 * it was HM.
13732 */
13733IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13734{
13735 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13736 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13737 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13738 if ( RT_FAILURE(rc)
13739 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13740 {
13741 /* fend off ins */
13742 if ( !pVCpu->iem.s.cIOReads
13743 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13744 || ( pEvtRec->u.RamWrite.cb != 1
13745 && pEvtRec->u.RamWrite.cb != 2
13746 && pEvtRec->u.RamWrite.cb != 4) )
13747 {
13748 /* fend off ROMs and MMIO */
13749 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13750 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13751 {
13752 /* fend off fxsave */
13753 if (pEvtRec->u.RamWrite.cb != 512)
13754 {
13755 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13756 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13757 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13758 RTAssertMsg2Add("%s: %.*Rhxs\n"
13759 "iem: %.*Rhxs\n",
13760 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13761 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13762 iemVerifyAssertAddRecordDump(pEvtRec);
13763 iemVerifyAssertMsg2(pVCpu);
13764 RTAssertPanic();
13765 }
13766 }
13767 }
13768 }
13769
13770}
13771
13772/**
13773 * Performs the post-execution verfication checks.
13774 */
13775IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13776{
13777 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13778 return rcStrictIem;
13779
13780 /*
13781 * Switch back the state.
13782 */
13783 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13784 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13785 Assert(pOrgCtx != pDebugCtx);
13786 IEM_GET_CTX(pVCpu) = pOrgCtx;
13787
13788 /*
13789 * Execute the instruction in REM.
13790 */
13791 bool fRem = false;
13792 PVM pVM = pVCpu->CTX_SUFF(pVM);
13793 PVMCPU pVCpu = pVCpu;
13794 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13795#ifdef IEM_VERIFICATION_MODE_FULL_HM
13796 if ( HMIsEnabled(pVM)
13797 && pVCpu->iem.s.cIOReads == 0
13798 && pVCpu->iem.s.cIOWrites == 0
13799 && !pVCpu->iem.s.fProblematicMemory)
13800 {
13801 uint64_t uStartRip = pOrgCtx->rip;
13802 unsigned iLoops = 0;
13803 do
13804 {
13805 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13806 iLoops++;
13807 } while ( rc == VINF_SUCCESS
13808 || ( rc == VINF_EM_DBG_STEPPED
13809 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13810 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13811 || ( pOrgCtx->rip != pDebugCtx->rip
13812 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13813 && iLoops < 8) );
13814 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13815 rc = VINF_SUCCESS;
13816 }
13817#endif
13818 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13819 || rc == VINF_IOM_R3_IOPORT_READ
13820 || rc == VINF_IOM_R3_IOPORT_WRITE
13821 || rc == VINF_IOM_R3_MMIO_READ
13822 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13823 || rc == VINF_IOM_R3_MMIO_WRITE
13824 || rc == VINF_CPUM_R3_MSR_READ
13825 || rc == VINF_CPUM_R3_MSR_WRITE
13826 || rc == VINF_EM_RESCHEDULE
13827 )
13828 {
13829 EMRemLock(pVM);
13830 rc = REMR3EmulateInstruction(pVM, pVCpu);
13831 AssertRC(rc);
13832 EMRemUnlock(pVM);
13833 fRem = true;
13834 }
13835
13836# if 1 /* Skip unimplemented instructions for now. */
13837 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13838 {
13839 IEM_GET_CTX(pVCpu) = pOrgCtx;
13840 if (rc == VINF_EM_DBG_STEPPED)
13841 return VINF_SUCCESS;
13842 return rc;
13843 }
13844# endif
13845
13846 /*
13847 * Compare the register states.
13848 */
13849 unsigned cDiffs = 0;
13850 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13851 {
13852 //Log(("REM and IEM ends up with different registers!\n"));
13853 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13854
13855# define CHECK_FIELD(a_Field) \
13856 do \
13857 { \
13858 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13859 { \
13860 switch (sizeof(pOrgCtx->a_Field)) \
13861 { \
13862 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13863 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13864 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13865 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13866 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13867 } \
13868 cDiffs++; \
13869 } \
13870 } while (0)
13871# define CHECK_XSTATE_FIELD(a_Field) \
13872 do \
13873 { \
13874 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13875 { \
13876 switch (sizeof(pOrgXState->a_Field)) \
13877 { \
13878 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13879 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13880 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13881 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13882 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13883 } \
13884 cDiffs++; \
13885 } \
13886 } while (0)
13887
13888# define CHECK_BIT_FIELD(a_Field) \
13889 do \
13890 { \
13891 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13892 { \
13893 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13894 cDiffs++; \
13895 } \
13896 } while (0)
13897
13898# define CHECK_SEL(a_Sel) \
13899 do \
13900 { \
13901 CHECK_FIELD(a_Sel.Sel); \
13902 CHECK_FIELD(a_Sel.Attr.u); \
13903 CHECK_FIELD(a_Sel.u64Base); \
13904 CHECK_FIELD(a_Sel.u32Limit); \
13905 CHECK_FIELD(a_Sel.fFlags); \
13906 } while (0)
13907
13908 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13909 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13910
13911#if 1 /* The recompiler doesn't update these the intel way. */
13912 if (fRem)
13913 {
13914 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13915 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13916 pOrgXState->x87.CS = pDebugXState->x87.CS;
13917 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13918 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13919 pOrgXState->x87.DS = pDebugXState->x87.DS;
13920 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13921 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13922 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13923 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13924 }
13925#endif
13926 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13927 {
13928 RTAssertMsg2Weak(" the FPU state differs\n");
13929 cDiffs++;
13930 CHECK_XSTATE_FIELD(x87.FCW);
13931 CHECK_XSTATE_FIELD(x87.FSW);
13932 CHECK_XSTATE_FIELD(x87.FTW);
13933 CHECK_XSTATE_FIELD(x87.FOP);
13934 CHECK_XSTATE_FIELD(x87.FPUIP);
13935 CHECK_XSTATE_FIELD(x87.CS);
13936 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13937 CHECK_XSTATE_FIELD(x87.FPUDP);
13938 CHECK_XSTATE_FIELD(x87.DS);
13939 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13940 CHECK_XSTATE_FIELD(x87.MXCSR);
13941 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13942 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13943 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13944 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13945 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13946 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13947 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13948 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13949 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13950 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13951 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13952 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13953 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13954 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13955 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13956 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13957 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13958 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13959 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13960 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13961 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13962 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13963 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13964 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13965 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13966 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13967 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13968 }
13969 CHECK_FIELD(rip);
13970 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13971 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13972 {
13973 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13974 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13975 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13976 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13977 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13978 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13979 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13980 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13981 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13982 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13983 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13984 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13985 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13986 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13987 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13988 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13989 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13990 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13991 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13992 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13993 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13994 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13995 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13996 }
13997
13998 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13999 CHECK_FIELD(rax);
14000 CHECK_FIELD(rcx);
14001 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14002 CHECK_FIELD(rdx);
14003 CHECK_FIELD(rbx);
14004 CHECK_FIELD(rsp);
14005 CHECK_FIELD(rbp);
14006 CHECK_FIELD(rsi);
14007 CHECK_FIELD(rdi);
14008 CHECK_FIELD(r8);
14009 CHECK_FIELD(r9);
14010 CHECK_FIELD(r10);
14011 CHECK_FIELD(r11);
14012 CHECK_FIELD(r12);
14013 CHECK_FIELD(r13);
14014 CHECK_SEL(cs);
14015 CHECK_SEL(ss);
14016 CHECK_SEL(ds);
14017 CHECK_SEL(es);
14018 CHECK_SEL(fs);
14019 CHECK_SEL(gs);
14020 CHECK_FIELD(cr0);
14021
14022 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14023 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14024 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14025 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14026 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14027 {
14028 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14029 { /* ignore */ }
14030 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14031 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14032 && fRem)
14033 { /* ignore */ }
14034 else
14035 CHECK_FIELD(cr2);
14036 }
14037 CHECK_FIELD(cr3);
14038 CHECK_FIELD(cr4);
14039 CHECK_FIELD(dr[0]);
14040 CHECK_FIELD(dr[1]);
14041 CHECK_FIELD(dr[2]);
14042 CHECK_FIELD(dr[3]);
14043 CHECK_FIELD(dr[6]);
14044 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14045 CHECK_FIELD(dr[7]);
14046 CHECK_FIELD(gdtr.cbGdt);
14047 CHECK_FIELD(gdtr.pGdt);
14048 CHECK_FIELD(idtr.cbIdt);
14049 CHECK_FIELD(idtr.pIdt);
14050 CHECK_SEL(ldtr);
14051 CHECK_SEL(tr);
14052 CHECK_FIELD(SysEnter.cs);
14053 CHECK_FIELD(SysEnter.eip);
14054 CHECK_FIELD(SysEnter.esp);
14055 CHECK_FIELD(msrEFER);
14056 CHECK_FIELD(msrSTAR);
14057 CHECK_FIELD(msrPAT);
14058 CHECK_FIELD(msrLSTAR);
14059 CHECK_FIELD(msrCSTAR);
14060 CHECK_FIELD(msrSFMASK);
14061 CHECK_FIELD(msrKERNELGSBASE);
14062
14063 if (cDiffs != 0)
14064 {
14065 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14066 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14067 RTAssertPanic();
14068 static bool volatile s_fEnterDebugger = true;
14069 if (s_fEnterDebugger)
14070 DBGFSTOP(pVM);
14071
14072# if 1 /* Ignore unimplemented instructions for now. */
14073 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14074 rcStrictIem = VINF_SUCCESS;
14075# endif
14076 }
14077# undef CHECK_FIELD
14078# undef CHECK_BIT_FIELD
14079 }
14080
14081 /*
14082 * If the register state compared fine, check the verification event
14083 * records.
14084 */
14085 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14086 {
14087 /*
14088 * Compare verficiation event records.
14089 * - I/O port accesses should be a 1:1 match.
14090 */
14091 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14092 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14093 while (pIemRec && pOtherRec)
14094 {
14095 /* Since we might miss RAM writes and reads, ignore reads and check
14096 that any written memory is the same extra ones. */
14097 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14098 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14099 && pIemRec->pNext)
14100 {
14101 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14102 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14103 pIemRec = pIemRec->pNext;
14104 }
14105
14106 /* Do the compare. */
14107 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14108 {
14109 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14110 break;
14111 }
14112 bool fEquals;
14113 switch (pIemRec->enmEvent)
14114 {
14115 case IEMVERIFYEVENT_IOPORT_READ:
14116 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14117 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14118 break;
14119 case IEMVERIFYEVENT_IOPORT_WRITE:
14120 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14121 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14122 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14123 break;
14124 case IEMVERIFYEVENT_IOPORT_STR_READ:
14125 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14126 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14127 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14128 break;
14129 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14130 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14131 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14132 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14133 break;
14134 case IEMVERIFYEVENT_RAM_READ:
14135 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14136 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14137 break;
14138 case IEMVERIFYEVENT_RAM_WRITE:
14139 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14140 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14141 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14142 break;
14143 default:
14144 fEquals = false;
14145 break;
14146 }
14147 if (!fEquals)
14148 {
14149 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14150 break;
14151 }
14152
14153 /* advance */
14154 pIemRec = pIemRec->pNext;
14155 pOtherRec = pOtherRec->pNext;
14156 }
14157
14158 /* Ignore extra writes and reads. */
14159 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14160 {
14161 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14162 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14163 pIemRec = pIemRec->pNext;
14164 }
14165 if (pIemRec != NULL)
14166 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14167 else if (pOtherRec != NULL)
14168 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14169 }
14170 IEM_GET_CTX(pVCpu) = pOrgCtx;
14171
14172 return rcStrictIem;
14173}
14174
14175#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14176
14177/* stubs */
14178IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14179{
14180 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14181 return VERR_INTERNAL_ERROR;
14182}
14183
14184IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14185{
14186 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14187 return VERR_INTERNAL_ERROR;
14188}
14189
14190#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14191
14192
14193#ifdef LOG_ENABLED
14194/**
14195 * Logs the current instruction.
14196 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14197 * @param pCtx The current CPU context.
14198 * @param fSameCtx Set if we have the same context information as the VMM,
14199 * clear if we may have already executed an instruction in
14200 * our debug context. When clear, we assume IEMCPU holds
14201 * valid CPU mode info.
14202 */
14203IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14204{
14205# ifdef IN_RING3
14206 if (LogIs2Enabled())
14207 {
14208 char szInstr[256];
14209 uint32_t cbInstr = 0;
14210 if (fSameCtx)
14211 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14212 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14213 szInstr, sizeof(szInstr), &cbInstr);
14214 else
14215 {
14216 uint32_t fFlags = 0;
14217 switch (pVCpu->iem.s.enmCpuMode)
14218 {
14219 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14220 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14221 case IEMMODE_16BIT:
14222 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14223 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14224 else
14225 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14226 break;
14227 }
14228 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14229 szInstr, sizeof(szInstr), &cbInstr);
14230 }
14231
14232 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14233 Log2(("****\n"
14234 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14235 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14236 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14237 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14238 " %s\n"
14239 ,
14240 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14241 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14242 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14243 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14244 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14245 szInstr));
14246
14247 if (LogIs3Enabled())
14248 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14249 }
14250 else
14251# endif
14252 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14253 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14254 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14255}
14256#endif
14257
14258
14259/**
14260 * Makes status code addjustments (pass up from I/O and access handler)
14261 * as well as maintaining statistics.
14262 *
14263 * @returns Strict VBox status code to pass up.
14264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14265 * @param rcStrict The status from executing an instruction.
14266 */
14267DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14268{
14269 if (rcStrict != VINF_SUCCESS)
14270 {
14271 if (RT_SUCCESS(rcStrict))
14272 {
14273 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14274 || rcStrict == VINF_IOM_R3_IOPORT_READ
14275 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14276 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14277 || rcStrict == VINF_IOM_R3_MMIO_READ
14278 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14279 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14280 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14281 || rcStrict == VINF_CPUM_R3_MSR_READ
14282 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14283 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14284 || rcStrict == VINF_EM_RAW_TO_R3
14285 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14286 /* raw-mode / virt handlers only: */
14287 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14288 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14289 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14290 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14291 || rcStrict == VINF_SELM_SYNC_GDT
14292 || rcStrict == VINF_CSAM_PENDING_ACTION
14293 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14294 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14295/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14296 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14297 if (rcPassUp == VINF_SUCCESS)
14298 pVCpu->iem.s.cRetInfStatuses++;
14299 else if ( rcPassUp < VINF_EM_FIRST
14300 || rcPassUp > VINF_EM_LAST
14301 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14302 {
14303 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14304 pVCpu->iem.s.cRetPassUpStatus++;
14305 rcStrict = rcPassUp;
14306 }
14307 else
14308 {
14309 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14310 pVCpu->iem.s.cRetInfStatuses++;
14311 }
14312 }
14313 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14314 pVCpu->iem.s.cRetAspectNotImplemented++;
14315 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14316 pVCpu->iem.s.cRetInstrNotImplemented++;
14317#ifdef IEM_VERIFICATION_MODE_FULL
14318 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14319 rcStrict = VINF_SUCCESS;
14320#endif
14321 else
14322 pVCpu->iem.s.cRetErrStatuses++;
14323 }
14324 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14325 {
14326 pVCpu->iem.s.cRetPassUpStatus++;
14327 rcStrict = pVCpu->iem.s.rcPassUp;
14328 }
14329
14330 return rcStrict;
14331}
14332
14333
14334/**
14335 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14336 * IEMExecOneWithPrefetchedByPC.
14337 *
14338 * Similar code is found in IEMExecLots.
14339 *
14340 * @return Strict VBox status code.
14341 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14343 * @param fExecuteInhibit If set, execute the instruction following CLI,
14344 * POP SS and MOV SS,GR.
14345 */
14346DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14347{
14348#ifdef IEM_WITH_SETJMP
14349 VBOXSTRICTRC rcStrict;
14350 jmp_buf JmpBuf;
14351 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14352 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14353 if ((rcStrict = setjmp(JmpBuf)) == 0)
14354 {
14355 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14356 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14357 }
14358 else
14359 pVCpu->iem.s.cLongJumps++;
14360 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14361#else
14362 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14363 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14364#endif
14365 if (rcStrict == VINF_SUCCESS)
14366 pVCpu->iem.s.cInstructions++;
14367 if (pVCpu->iem.s.cActiveMappings > 0)
14368 {
14369 Assert(rcStrict != VINF_SUCCESS);
14370 iemMemRollback(pVCpu);
14371 }
14372//#ifdef DEBUG
14373// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14374//#endif
14375
14376 /* Execute the next instruction as well if a cli, pop ss or
14377 mov ss, Gr has just completed successfully. */
14378 if ( fExecuteInhibit
14379 && rcStrict == VINF_SUCCESS
14380 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14381 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14382 {
14383 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14384 if (rcStrict == VINF_SUCCESS)
14385 {
14386#ifdef LOG_ENABLED
14387 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14388#endif
14389#ifdef IEM_WITH_SETJMP
14390 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14391 if ((rcStrict = setjmp(JmpBuf)) == 0)
14392 {
14393 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14394 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14395 }
14396 else
14397 pVCpu->iem.s.cLongJumps++;
14398 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14399#else
14400 IEM_OPCODE_GET_NEXT_U8(&b);
14401 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14402#endif
14403 if (rcStrict == VINF_SUCCESS)
14404 pVCpu->iem.s.cInstructions++;
14405 if (pVCpu->iem.s.cActiveMappings > 0)
14406 {
14407 Assert(rcStrict != VINF_SUCCESS);
14408 iemMemRollback(pVCpu);
14409 }
14410 }
14411 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14412 }
14413
14414 /*
14415 * Return value fiddling, statistics and sanity assertions.
14416 */
14417 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14418
14419 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14420 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14421#if defined(IEM_VERIFICATION_MODE_FULL)
14422 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14423 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14424 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14425 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14426#endif
14427 return rcStrict;
14428}
14429
14430
14431#ifdef IN_RC
14432/**
14433 * Re-enters raw-mode or ensure we return to ring-3.
14434 *
14435 * @returns rcStrict, maybe modified.
14436 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14437 * @param pCtx The current CPU context.
14438 * @param rcStrict The status code returne by the interpreter.
14439 */
14440DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
14441{
14442 if ( !pVCpu->iem.s.fInPatchCode
14443 && ( rcStrict == VINF_SUCCESS
14444 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14445 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14446 {
14447 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14448 CPUMRawEnter(pVCpu);
14449 else
14450 {
14451 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14452 rcStrict = VINF_EM_RESCHEDULE;
14453 }
14454 }
14455 return rcStrict;
14456}
14457#endif
14458
14459
14460/**
14461 * Execute one instruction.
14462 *
14463 * @return Strict VBox status code.
14464 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14465 */
14466VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14467{
14468#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14469 if (++pVCpu->iem.s.cVerifyDepth == 1)
14470 iemExecVerificationModeSetup(pVCpu);
14471#endif
14472#ifdef LOG_ENABLED
14473 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14474 iemLogCurInstr(pVCpu, pCtx, true);
14475#endif
14476
14477 /*
14478 * Do the decoding and emulation.
14479 */
14480 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14481 if (rcStrict == VINF_SUCCESS)
14482 rcStrict = iemExecOneInner(pVCpu, true);
14483
14484#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14485 /*
14486 * Assert some sanity.
14487 */
14488 if (pVCpu->iem.s.cVerifyDepth == 1)
14489 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14490 pVCpu->iem.s.cVerifyDepth--;
14491#endif
14492#ifdef IN_RC
14493 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14494#endif
14495 if (rcStrict != VINF_SUCCESS)
14496 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14497 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14498 return rcStrict;
14499}
14500
14501
14502VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14503{
14504 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14505 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14506
14507 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14508 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14509 if (rcStrict == VINF_SUCCESS)
14510 {
14511 rcStrict = iemExecOneInner(pVCpu, true);
14512 if (pcbWritten)
14513 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14514 }
14515
14516#ifdef IN_RC
14517 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14518#endif
14519 return rcStrict;
14520}
14521
14522
14523VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14524 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14525{
14526 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14527 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14528
14529 VBOXSTRICTRC rcStrict;
14530 if ( cbOpcodeBytes
14531 && pCtx->rip == OpcodeBytesPC)
14532 {
14533 iemInitDecoder(pVCpu, false);
14534#ifdef IEM_WITH_CODE_TLB
14535 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14536 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14537 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14538 pVCpu->iem.s.offCurInstrStart = 0;
14539 pVCpu->iem.s.offInstrNextByte = 0;
14540#else
14541 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14542 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14543#endif
14544 rcStrict = VINF_SUCCESS;
14545 }
14546 else
14547 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14548 if (rcStrict == VINF_SUCCESS)
14549 {
14550 rcStrict = iemExecOneInner(pVCpu, true);
14551 }
14552
14553#ifdef IN_RC
14554 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14555#endif
14556 return rcStrict;
14557}
14558
14559
14560VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14561{
14562 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14563 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14564
14565 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14566 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14567 if (rcStrict == VINF_SUCCESS)
14568 {
14569 rcStrict = iemExecOneInner(pVCpu, false);
14570 if (pcbWritten)
14571 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14572 }
14573
14574#ifdef IN_RC
14575 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14576#endif
14577 return rcStrict;
14578}
14579
14580
14581VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14582 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14583{
14584 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14585 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14586
14587 VBOXSTRICTRC rcStrict;
14588 if ( cbOpcodeBytes
14589 && pCtx->rip == OpcodeBytesPC)
14590 {
14591 iemInitDecoder(pVCpu, true);
14592#ifdef IEM_WITH_CODE_TLB
14593 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14594 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14595 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14596 pVCpu->iem.s.offCurInstrStart = 0;
14597 pVCpu->iem.s.offInstrNextByte = 0;
14598#else
14599 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14600 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14601#endif
14602 rcStrict = VINF_SUCCESS;
14603 }
14604 else
14605 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14606 if (rcStrict == VINF_SUCCESS)
14607 rcStrict = iemExecOneInner(pVCpu, false);
14608
14609#ifdef IN_RC
14610 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14611#endif
14612 return rcStrict;
14613}
14614
14615
14616/**
14617 * For debugging DISGetParamSize, may come in handy.
14618 *
14619 * @returns Strict VBox status code.
14620 * @param pVCpu The cross context virtual CPU structure of the
14621 * calling EMT.
14622 * @param pCtxCore The context core structure.
14623 * @param OpcodeBytesPC The PC of the opcode bytes.
14624 * @param pvOpcodeBytes Prefeched opcode bytes.
14625 * @param cbOpcodeBytes Number of prefetched bytes.
14626 * @param pcbWritten Where to return the number of bytes written.
14627 * Optional.
14628 */
14629VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14630 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14631 uint32_t *pcbWritten)
14632{
14633 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14634 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14635
14636 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14637 VBOXSTRICTRC rcStrict;
14638 if ( cbOpcodeBytes
14639 && pCtx->rip == OpcodeBytesPC)
14640 {
14641 iemInitDecoder(pVCpu, true);
14642#ifdef IEM_WITH_CODE_TLB
14643 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14644 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14645 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14646 pVCpu->iem.s.offCurInstrStart = 0;
14647 pVCpu->iem.s.offInstrNextByte = 0;
14648#else
14649 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14650 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14651#endif
14652 rcStrict = VINF_SUCCESS;
14653 }
14654 else
14655 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14656 if (rcStrict == VINF_SUCCESS)
14657 {
14658 rcStrict = iemExecOneInner(pVCpu, false);
14659 if (pcbWritten)
14660 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14661 }
14662
14663#ifdef IN_RC
14664 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14665#endif
14666 return rcStrict;
14667}
14668
14669
14670VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14671{
14672 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14673
14674#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14675 /*
14676 * See if there is an interrupt pending in TRPM, inject it if we can.
14677 */
14678 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14679# ifdef IEM_VERIFICATION_MODE_FULL
14680 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14681# endif
14682 if ( pCtx->eflags.Bits.u1IF
14683 && TRPMHasTrap(pVCpu)
14684 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14685 {
14686 uint8_t u8TrapNo;
14687 TRPMEVENT enmType;
14688 RTGCUINT uErrCode;
14689 RTGCPTR uCr2;
14690 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14691 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14692 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14693 TRPMResetTrap(pVCpu);
14694 }
14695
14696 /*
14697 * Log the state.
14698 */
14699# ifdef LOG_ENABLED
14700 iemLogCurInstr(pVCpu, pCtx, true);
14701# endif
14702
14703 /*
14704 * Do the decoding and emulation.
14705 */
14706 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14707 if (rcStrict == VINF_SUCCESS)
14708 rcStrict = iemExecOneInner(pVCpu, true);
14709
14710 /*
14711 * Assert some sanity.
14712 */
14713 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14714
14715 /*
14716 * Log and return.
14717 */
14718 if (rcStrict != VINF_SUCCESS)
14719 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14720 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14721 if (pcInstructions)
14722 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14723 return rcStrict;
14724
14725#else /* Not verification mode */
14726
14727 /*
14728 * See if there is an interrupt pending in TRPM, inject it if we can.
14729 */
14730 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14731# ifdef IEM_VERIFICATION_MODE_FULL
14732 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14733# endif
14734 if ( pCtx->eflags.Bits.u1IF
14735 && TRPMHasTrap(pVCpu)
14736 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14737 {
14738 uint8_t u8TrapNo;
14739 TRPMEVENT enmType;
14740 RTGCUINT uErrCode;
14741 RTGCPTR uCr2;
14742 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14743 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14744 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14745 TRPMResetTrap(pVCpu);
14746 }
14747
14748 /*
14749 * Initial decoder init w/ prefetch, then setup setjmp.
14750 */
14751 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14752 if (rcStrict == VINF_SUCCESS)
14753 {
14754# ifdef IEM_WITH_SETJMP
14755 jmp_buf JmpBuf;
14756 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14757 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14758 pVCpu->iem.s.cActiveMappings = 0;
14759 if ((rcStrict = setjmp(JmpBuf)) == 0)
14760# endif
14761 {
14762 /*
14763 * The run loop. We limit ourselves to 4096 instructions right now.
14764 */
14765 PVM pVM = pVCpu->CTX_SUFF(pVM);
14766 uint32_t cInstr = 4096;
14767 for (;;)
14768 {
14769 /*
14770 * Log the state.
14771 */
14772# ifdef LOG_ENABLED
14773 iemLogCurInstr(pVCpu, pCtx, true);
14774# endif
14775
14776 /*
14777 * Do the decoding and emulation.
14778 */
14779 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14780 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14781 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14782 {
14783 Assert(pVCpu->iem.s.cActiveMappings == 0);
14784 pVCpu->iem.s.cInstructions++;
14785 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14786 {
14787 uint32_t fCpu = pVCpu->fLocalForcedActions
14788 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14789 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14790 | VMCPU_FF_TLB_FLUSH
14791# ifdef VBOX_WITH_RAW_MODE
14792 | VMCPU_FF_TRPM_SYNC_IDT
14793 | VMCPU_FF_SELM_SYNC_TSS
14794 | VMCPU_FF_SELM_SYNC_GDT
14795 | VMCPU_FF_SELM_SYNC_LDT
14796# endif
14797 | VMCPU_FF_INHIBIT_INTERRUPTS
14798 | VMCPU_FF_BLOCK_NMIS
14799 | VMCPU_FF_UNHALT ));
14800
14801 if (RT_LIKELY( ( !fCpu
14802 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14803 && !pCtx->rflags.Bits.u1IF) )
14804 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14805 {
14806 if (cInstr-- > 0)
14807 {
14808 Assert(pVCpu->iem.s.cActiveMappings == 0);
14809 iemReInitDecoder(pVCpu);
14810 continue;
14811 }
14812 }
14813 }
14814 Assert(pVCpu->iem.s.cActiveMappings == 0);
14815 }
14816 else if (pVCpu->iem.s.cActiveMappings > 0)
14817 iemMemRollback(pVCpu);
14818 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14819 break;
14820 }
14821 }
14822# ifdef IEM_WITH_SETJMP
14823 else
14824 {
14825 if (pVCpu->iem.s.cActiveMappings > 0)
14826 iemMemRollback(pVCpu);
14827 pVCpu->iem.s.cLongJumps++;
14828 }
14829 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14830# endif
14831
14832 /*
14833 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14834 */
14835 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14836 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14837# if defined(IEM_VERIFICATION_MODE_FULL)
14838 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14839 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14840 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14841 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14842# endif
14843 }
14844
14845 /*
14846 * Maybe re-enter raw-mode and log.
14847 */
14848# ifdef IN_RC
14849 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14850# endif
14851 if (rcStrict != VINF_SUCCESS)
14852 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14853 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14854 if (pcInstructions)
14855 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14856 return rcStrict;
14857#endif /* Not verification mode */
14858}
14859
14860
14861
14862/**
14863 * Injects a trap, fault, abort, software interrupt or external interrupt.
14864 *
14865 * The parameter list matches TRPMQueryTrapAll pretty closely.
14866 *
14867 * @returns Strict VBox status code.
14868 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14869 * @param u8TrapNo The trap number.
14870 * @param enmType What type is it (trap/fault/abort), software
14871 * interrupt or hardware interrupt.
14872 * @param uErrCode The error code if applicable.
14873 * @param uCr2 The CR2 value if applicable.
14874 * @param cbInstr The instruction length (only relevant for
14875 * software interrupts).
14876 */
14877VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14878 uint8_t cbInstr)
14879{
14880 iemInitDecoder(pVCpu, false);
14881#ifdef DBGFTRACE_ENABLED
14882 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14883 u8TrapNo, enmType, uErrCode, uCr2);
14884#endif
14885
14886 uint32_t fFlags;
14887 switch (enmType)
14888 {
14889 case TRPM_HARDWARE_INT:
14890 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14891 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14892 uErrCode = uCr2 = 0;
14893 break;
14894
14895 case TRPM_SOFTWARE_INT:
14896 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14897 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14898 uErrCode = uCr2 = 0;
14899 break;
14900
14901 case TRPM_TRAP:
14902 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14903 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14904 if (u8TrapNo == X86_XCPT_PF)
14905 fFlags |= IEM_XCPT_FLAGS_CR2;
14906 switch (u8TrapNo)
14907 {
14908 case X86_XCPT_DF:
14909 case X86_XCPT_TS:
14910 case X86_XCPT_NP:
14911 case X86_XCPT_SS:
14912 case X86_XCPT_PF:
14913 case X86_XCPT_AC:
14914 fFlags |= IEM_XCPT_FLAGS_ERR;
14915 break;
14916
14917 case X86_XCPT_NMI:
14918 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14919 break;
14920 }
14921 break;
14922
14923 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14924 }
14925
14926 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14927}
14928
14929
14930/**
14931 * Injects the active TRPM event.
14932 *
14933 * @returns Strict VBox status code.
14934 * @param pVCpu The cross context virtual CPU structure.
14935 */
14936VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14937{
14938#ifndef IEM_IMPLEMENTS_TASKSWITCH
14939 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14940#else
14941 uint8_t u8TrapNo;
14942 TRPMEVENT enmType;
14943 RTGCUINT uErrCode;
14944 RTGCUINTPTR uCr2;
14945 uint8_t cbInstr;
14946 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14947 if (RT_FAILURE(rc))
14948 return rc;
14949
14950 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14951
14952 /** @todo Are there any other codes that imply the event was successfully
14953 * delivered to the guest? See @bugref{6607}. */
14954 if ( rcStrict == VINF_SUCCESS
14955 || rcStrict == VINF_IEM_RAISED_XCPT)
14956 {
14957 TRPMResetTrap(pVCpu);
14958 }
14959 return rcStrict;
14960#endif
14961}
14962
14963
14964VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14965{
14966 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14967 return VERR_NOT_IMPLEMENTED;
14968}
14969
14970
14971VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14972{
14973 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14974 return VERR_NOT_IMPLEMENTED;
14975}
14976
14977
14978#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14979/**
14980 * Executes a IRET instruction with default operand size.
14981 *
14982 * This is for PATM.
14983 *
14984 * @returns VBox status code.
14985 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14986 * @param pCtxCore The register frame.
14987 */
14988VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14989{
14990 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14991
14992 iemCtxCoreToCtx(pCtx, pCtxCore);
14993 iemInitDecoder(pVCpu);
14994 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14995 if (rcStrict == VINF_SUCCESS)
14996 iemCtxToCtxCore(pCtxCore, pCtx);
14997 else
14998 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14999 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15000 return rcStrict;
15001}
15002#endif
15003
15004
15005/**
15006 * Macro used by the IEMExec* method to check the given instruction length.
15007 *
15008 * Will return on failure!
15009 *
15010 * @param a_cbInstr The given instruction length.
15011 * @param a_cbMin The minimum length.
15012 */
15013#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15014 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15015 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15016
15017
15018/**
15019 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15020 *
15021 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15022 *
15023 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15024 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15025 * @param rcStrict The status code to fiddle.
15026 */
15027DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15028{
15029 iemUninitExec(pVCpu);
15030#ifdef IN_RC
15031 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15032 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15033#else
15034 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15035#endif
15036}
15037
15038
15039/**
15040 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15041 *
15042 * This API ASSUMES that the caller has already verified that the guest code is
15043 * allowed to access the I/O port. (The I/O port is in the DX register in the
15044 * guest state.)
15045 *
15046 * @returns Strict VBox status code.
15047 * @param pVCpu The cross context virtual CPU structure.
15048 * @param cbValue The size of the I/O port access (1, 2, or 4).
15049 * @param enmAddrMode The addressing mode.
15050 * @param fRepPrefix Indicates whether a repeat prefix is used
15051 * (doesn't matter which for this instruction).
15052 * @param cbInstr The instruction length in bytes.
15053 * @param iEffSeg The effective segment address.
15054 * @param fIoChecked Whether the access to the I/O port has been
15055 * checked or not. It's typically checked in the
15056 * HM scenario.
15057 */
15058VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15059 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15060{
15061 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15062 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15063
15064 /*
15065 * State init.
15066 */
15067 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15068
15069 /*
15070 * Switch orgy for getting to the right handler.
15071 */
15072 VBOXSTRICTRC rcStrict;
15073 if (fRepPrefix)
15074 {
15075 switch (enmAddrMode)
15076 {
15077 case IEMMODE_16BIT:
15078 switch (cbValue)
15079 {
15080 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15081 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15082 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15083 default:
15084 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15085 }
15086 break;
15087
15088 case IEMMODE_32BIT:
15089 switch (cbValue)
15090 {
15091 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15092 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15093 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15094 default:
15095 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15096 }
15097 break;
15098
15099 case IEMMODE_64BIT:
15100 switch (cbValue)
15101 {
15102 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15103 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15104 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15105 default:
15106 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15107 }
15108 break;
15109
15110 default:
15111 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15112 }
15113 }
15114 else
15115 {
15116 switch (enmAddrMode)
15117 {
15118 case IEMMODE_16BIT:
15119 switch (cbValue)
15120 {
15121 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15122 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15123 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15124 default:
15125 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15126 }
15127 break;
15128
15129 case IEMMODE_32BIT:
15130 switch (cbValue)
15131 {
15132 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15133 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15134 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15135 default:
15136 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15137 }
15138 break;
15139
15140 case IEMMODE_64BIT:
15141 switch (cbValue)
15142 {
15143 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15144 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15145 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15146 default:
15147 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15148 }
15149 break;
15150
15151 default:
15152 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15153 }
15154 }
15155
15156 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15157}
15158
15159
15160/**
15161 * Interface for HM and EM for executing string I/O IN (read) instructions.
15162 *
15163 * This API ASSUMES that the caller has already verified that the guest code is
15164 * allowed to access the I/O port. (The I/O port is in the DX register in the
15165 * guest state.)
15166 *
15167 * @returns Strict VBox status code.
15168 * @param pVCpu The cross context virtual CPU structure.
15169 * @param cbValue The size of the I/O port access (1, 2, or 4).
15170 * @param enmAddrMode The addressing mode.
15171 * @param fRepPrefix Indicates whether a repeat prefix is used
15172 * (doesn't matter which for this instruction).
15173 * @param cbInstr The instruction length in bytes.
15174 * @param fIoChecked Whether the access to the I/O port has been
15175 * checked or not. It's typically checked in the
15176 * HM scenario.
15177 */
15178VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15179 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15180{
15181 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15182
15183 /*
15184 * State init.
15185 */
15186 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15187
15188 /*
15189 * Switch orgy for getting to the right handler.
15190 */
15191 VBOXSTRICTRC rcStrict;
15192 if (fRepPrefix)
15193 {
15194 switch (enmAddrMode)
15195 {
15196 case IEMMODE_16BIT:
15197 switch (cbValue)
15198 {
15199 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15200 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15201 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15202 default:
15203 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15204 }
15205 break;
15206
15207 case IEMMODE_32BIT:
15208 switch (cbValue)
15209 {
15210 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15211 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15212 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15213 default:
15214 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15215 }
15216 break;
15217
15218 case IEMMODE_64BIT:
15219 switch (cbValue)
15220 {
15221 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15222 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15223 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15224 default:
15225 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15226 }
15227 break;
15228
15229 default:
15230 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15231 }
15232 }
15233 else
15234 {
15235 switch (enmAddrMode)
15236 {
15237 case IEMMODE_16BIT:
15238 switch (cbValue)
15239 {
15240 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15241 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15242 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15243 default:
15244 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15245 }
15246 break;
15247
15248 case IEMMODE_32BIT:
15249 switch (cbValue)
15250 {
15251 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15252 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15253 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15254 default:
15255 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15256 }
15257 break;
15258
15259 case IEMMODE_64BIT:
15260 switch (cbValue)
15261 {
15262 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15263 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15264 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15265 default:
15266 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15267 }
15268 break;
15269
15270 default:
15271 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15272 }
15273 }
15274
15275 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15276}
15277
15278
15279/**
15280 * Interface for rawmode to write execute an OUT instruction.
15281 *
15282 * @returns Strict VBox status code.
15283 * @param pVCpu The cross context virtual CPU structure.
15284 * @param cbInstr The instruction length in bytes.
15285 * @param u16Port The port to read.
15286 * @param cbReg The register size.
15287 *
15288 * @remarks In ring-0 not all of the state needs to be synced in.
15289 */
15290VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15291{
15292 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15293 Assert(cbReg <= 4 && cbReg != 3);
15294
15295 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15296 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15297 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15298}
15299
15300
15301/**
15302 * Interface for rawmode to write execute an IN instruction.
15303 *
15304 * @returns Strict VBox status code.
15305 * @param pVCpu The cross context virtual CPU structure.
15306 * @param cbInstr The instruction length in bytes.
15307 * @param u16Port The port to read.
15308 * @param cbReg The register size.
15309 */
15310VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15311{
15312 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15313 Assert(cbReg <= 4 && cbReg != 3);
15314
15315 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15316 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15317 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15318}
15319
15320
15321/**
15322 * Interface for HM and EM to write to a CRx register.
15323 *
15324 * @returns Strict VBox status code.
15325 * @param pVCpu The cross context virtual CPU structure.
15326 * @param cbInstr The instruction length in bytes.
15327 * @param iCrReg The control register number (destination).
15328 * @param iGReg The general purpose register number (source).
15329 *
15330 * @remarks In ring-0 not all of the state needs to be synced in.
15331 */
15332VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15333{
15334 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15335 Assert(iCrReg < 16);
15336 Assert(iGReg < 16);
15337
15338 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15339 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15340 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15341}
15342
15343
15344/**
15345 * Interface for HM and EM to read from a CRx register.
15346 *
15347 * @returns Strict VBox status code.
15348 * @param pVCpu The cross context virtual CPU structure.
15349 * @param cbInstr The instruction length in bytes.
15350 * @param iGReg The general purpose register number (destination).
15351 * @param iCrReg The control register number (source).
15352 *
15353 * @remarks In ring-0 not all of the state needs to be synced in.
15354 */
15355VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15356{
15357 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15358 Assert(iCrReg < 16);
15359 Assert(iGReg < 16);
15360
15361 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15362 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15363 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15364}
15365
15366
15367/**
15368 * Interface for HM and EM to clear the CR0[TS] bit.
15369 *
15370 * @returns Strict VBox status code.
15371 * @param pVCpu The cross context virtual CPU structure.
15372 * @param cbInstr The instruction length in bytes.
15373 *
15374 * @remarks In ring-0 not all of the state needs to be synced in.
15375 */
15376VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15377{
15378 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15379
15380 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15381 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15382 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15383}
15384
15385
15386/**
15387 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15388 *
15389 * @returns Strict VBox status code.
15390 * @param pVCpu The cross context virtual CPU structure.
15391 * @param cbInstr The instruction length in bytes.
15392 * @param uValue The value to load into CR0.
15393 *
15394 * @remarks In ring-0 not all of the state needs to be synced in.
15395 */
15396VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15397{
15398 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15399
15400 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15401 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15402 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15403}
15404
15405
15406/**
15407 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15408 *
15409 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15410 *
15411 * @returns Strict VBox status code.
15412 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15413 * @param cbInstr The instruction length in bytes.
15414 * @remarks In ring-0 not all of the state needs to be synced in.
15415 * @thread EMT(pVCpu)
15416 */
15417VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15418{
15419 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15420
15421 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15422 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15423 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15424}
15425
15426
15427/**
15428 * Checks if IEM is in the process of delivering an event (interrupt or
15429 * exception).
15430 *
15431 * @returns true if we're in the process of raising an interrupt or exception,
15432 * false otherwise.
15433 * @param pVCpu The cross context virtual CPU structure.
15434 * @param puVector Where to store the vector associated with the
15435 * currently delivered event, optional.
15436 * @param pfFlags Where to store th event delivery flags (see
15437 * IEM_XCPT_FLAGS_XXX), optional.
15438 * @param puErr Where to store the error code associated with the
15439 * event, optional.
15440 * @param puCr2 Where to store the CR2 associated with the event,
15441 * optional.
15442 * @remarks The caller should check the flags to determine if the error code and
15443 * CR2 are valid for the event.
15444 */
15445VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15446{
15447 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15448 if (fRaisingXcpt)
15449 {
15450 if (puVector)
15451 *puVector = pVCpu->iem.s.uCurXcpt;
15452 if (pfFlags)
15453 *pfFlags = pVCpu->iem.s.fCurXcpt;
15454 if (puErr)
15455 *puErr = pVCpu->iem.s.uCurXcptErr;
15456 if (puCr2)
15457 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15458 }
15459 return fRaisingXcpt;
15460}
15461
15462
15463#ifdef VBOX_WITH_NESTED_HWVIRT
15464/**
15465 * Interface for HM and EM to emulate the STGI instruction.
15466 *
15467 * @returns Strict VBox status code.
15468 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15469 * @param cbInstr The instruction length in bytes.
15470 * @thread EMT(pVCpu)
15471 */
15472VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15473{
15474 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15475
15476 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15477 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15478 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15479}
15480
15481
15482/**
15483 * Interface for HM and EM to emulate the STGI instruction.
15484 *
15485 * @returns Strict VBox status code.
15486 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15487 * @param cbInstr The instruction length in bytes.
15488 * @thread EMT(pVCpu)
15489 */
15490VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15491{
15492 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15493
15494 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15495 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15496 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15497}
15498
15499
15500/**
15501 * Interface for HM and EM to emulate the VMLOAD instruction.
15502 *
15503 * @returns Strict VBox status code.
15504 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15505 * @param cbInstr The instruction length in bytes.
15506 * @thread EMT(pVCpu)
15507 */
15508VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15509{
15510 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15511
15512 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15513 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15514 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15515}
15516
15517
15518/**
15519 * Interface for HM and EM to emulate the VMSAVE instruction.
15520 *
15521 * @returns Strict VBox status code.
15522 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15523 * @param cbInstr The instruction length in bytes.
15524 * @thread EMT(pVCpu)
15525 */
15526VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15527{
15528 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15529
15530 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15531 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15532 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15533}
15534
15535
15536/**
15537 * Interface for HM and EM to emulate the INVLPGA instruction.
15538 *
15539 * @returns Strict VBox status code.
15540 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15541 * @param cbInstr The instruction length in bytes.
15542 * @thread EMT(pVCpu)
15543 */
15544VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15545{
15546 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15547
15548 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15549 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15550 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15551}
15552#endif /* VBOX_WITH_NESTED_HWVIRT */
15553
15554#ifdef IN_RING3
15555
15556/**
15557 * Handles the unlikely and probably fatal merge cases.
15558 *
15559 * @returns Merged status code.
15560 * @param rcStrict Current EM status code.
15561 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15562 * with @a rcStrict.
15563 * @param iMemMap The memory mapping index. For error reporting only.
15564 * @param pVCpu The cross context virtual CPU structure of the calling
15565 * thread, for error reporting only.
15566 */
15567DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15568 unsigned iMemMap, PVMCPU pVCpu)
15569{
15570 if (RT_FAILURE_NP(rcStrict))
15571 return rcStrict;
15572
15573 if (RT_FAILURE_NP(rcStrictCommit))
15574 return rcStrictCommit;
15575
15576 if (rcStrict == rcStrictCommit)
15577 return rcStrictCommit;
15578
15579 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15580 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15581 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15582 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15583 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15584 return VERR_IOM_FF_STATUS_IPE;
15585}
15586
15587
15588/**
15589 * Helper for IOMR3ProcessForceFlag.
15590 *
15591 * @returns Merged status code.
15592 * @param rcStrict Current EM status code.
15593 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15594 * with @a rcStrict.
15595 * @param iMemMap The memory mapping index. For error reporting only.
15596 * @param pVCpu The cross context virtual CPU structure of the calling
15597 * thread, for error reporting only.
15598 */
15599DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15600{
15601 /* Simple. */
15602 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15603 return rcStrictCommit;
15604
15605 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15606 return rcStrict;
15607
15608 /* EM scheduling status codes. */
15609 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15610 && rcStrict <= VINF_EM_LAST))
15611 {
15612 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15613 && rcStrictCommit <= VINF_EM_LAST))
15614 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15615 }
15616
15617 /* Unlikely */
15618 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15619}
15620
15621
15622/**
15623 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15624 *
15625 * @returns Merge between @a rcStrict and what the commit operation returned.
15626 * @param pVM The cross context VM structure.
15627 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15628 * @param rcStrict The status code returned by ring-0 or raw-mode.
15629 */
15630VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15631{
15632 /*
15633 * Reset the pending commit.
15634 */
15635 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15636 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15637 ("%#x %#x %#x\n",
15638 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15639 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15640
15641 /*
15642 * Commit the pending bounce buffers (usually just one).
15643 */
15644 unsigned cBufs = 0;
15645 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15646 while (iMemMap-- > 0)
15647 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15648 {
15649 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15650 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15651 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15652
15653 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15654 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15655 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15656
15657 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15658 {
15659 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15660 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15661 pbBuf,
15662 cbFirst,
15663 PGMACCESSORIGIN_IEM);
15664 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15665 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15666 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15667 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15668 }
15669
15670 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15671 {
15672 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15673 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15674 pbBuf + cbFirst,
15675 cbSecond,
15676 PGMACCESSORIGIN_IEM);
15677 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15678 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15679 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15680 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15681 }
15682 cBufs++;
15683 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15684 }
15685
15686 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15687 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15688 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15689 pVCpu->iem.s.cActiveMappings = 0;
15690 return rcStrict;
15691}
15692
15693#endif /* IN_RING3 */
15694
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette