VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 66886

Last change on this file since 66886 was 66886, checked in by vboxsync, 8 years ago

IEM: Implemented vmovups Vps,Wps (VEX.0f 10)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 620.4 KB
Line 
1/* $Id: IEMAll.cpp 66886 2017-05-15 09:20:40Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/hm_svm.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#ifdef IEM_VERIFICATION_MODE_FULL
118# include <VBox/vmm/rem.h>
119# include <VBox/vmm/mm.h>
120#endif
121#include <VBox/vmm/vm.h>
122#include <VBox/log.h>
123#include <VBox/err.h>
124#include <VBox/param.h>
125#include <VBox/dis.h>
126#include <VBox/disopcode.h>
127#include <iprt/assert.h>
128#include <iprt/string.h>
129#include <iprt/x86.h>
130
131
132/*********************************************************************************************************************************
133* Structures and Typedefs *
134*********************************************************************************************************************************/
135/** @typedef PFNIEMOP
136 * Pointer to an opcode decoder function.
137 */
138
139/** @def FNIEMOP_DEF
140 * Define an opcode decoder function.
141 *
142 * We're using macors for this so that adding and removing parameters as well as
143 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
144 *
145 * @param a_Name The function name.
146 */
147
148/** @typedef PFNIEMOPRM
149 * Pointer to an opcode decoder function with RM byte.
150 */
151
152/** @def FNIEMOPRM_DEF
153 * Define an opcode decoder function with RM byte.
154 *
155 * We're using macors for this so that adding and removing parameters as well as
156 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
157 *
158 * @param a_Name The function name.
159 */
160
161#if defined(__GNUC__) && defined(RT_ARCH_X86)
162typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
170
171#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
172typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
174# define FNIEMOP_DEF(a_Name) \
175 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
176# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
177 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
178# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
179 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
180
181#elif defined(__GNUC__)
182typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
183typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
184# define FNIEMOP_DEF(a_Name) \
185 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
186# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
187 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
188# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
189 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
190
191#else
192typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
193typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
194# define FNIEMOP_DEF(a_Name) \
195 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
196# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
197 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
198# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
199 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
200
201#endif
202#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
203
204
205/**
206 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
207 */
208typedef union IEMSELDESC
209{
210 /** The legacy view. */
211 X86DESC Legacy;
212 /** The long mode view. */
213 X86DESC64 Long;
214} IEMSELDESC;
215/** Pointer to a selector descriptor table entry. */
216typedef IEMSELDESC *PIEMSELDESC;
217
218/**
219 * CPU exception classes.
220 */
221typedef enum IEMXCPTCLASS
222{
223 IEMXCPTCLASS_BENIGN,
224 IEMXCPTCLASS_CONTRIBUTORY,
225 IEMXCPTCLASS_PAGE_FAULT
226} IEMXCPTCLASS;
227
228
229/*********************************************************************************************************************************
230* Defined Constants And Macros *
231*********************************************************************************************************************************/
232/** @def IEM_WITH_SETJMP
233 * Enables alternative status code handling using setjmps.
234 *
235 * This adds a bit of expense via the setjmp() call since it saves all the
236 * non-volatile registers. However, it eliminates return code checks and allows
237 * for more optimal return value passing (return regs instead of stack buffer).
238 */
239#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
240# define IEM_WITH_SETJMP
241#endif
242
243/** Temporary hack to disable the double execution. Will be removed in favor
244 * of a dedicated execution mode in EM. */
245//#define IEM_VERIFICATION_MODE_NO_REM
246
247/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
248 * due to GCC lacking knowledge about the value range of a switch. */
249#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
250
251/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
252#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
253
254/**
255 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
256 * occation.
257 */
258#ifdef LOG_ENABLED
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 do { \
261 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
262 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
263 } while (0)
264#else
265# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
266 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
267#endif
268
269/**
270 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
271 * occation using the supplied logger statement.
272 *
273 * @param a_LoggerArgs What to log on failure.
274 */
275#ifdef LOG_ENABLED
276# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
277 do { \
278 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
279 /*LogFunc(a_LoggerArgs);*/ \
280 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
281 } while (0)
282#else
283# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
284 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
285#endif
286
287/**
288 * Call an opcode decoder function.
289 *
290 * We're using macors for this so that adding and removing parameters can be
291 * done as we please. See FNIEMOP_DEF.
292 */
293#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
294
295/**
296 * Call a common opcode decoder function taking one extra argument.
297 *
298 * We're using macors for this so that adding and removing parameters can be
299 * done as we please. See FNIEMOP_DEF_1.
300 */
301#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
302
303/**
304 * Call a common opcode decoder function taking one extra argument.
305 *
306 * We're using macors for this so that adding and removing parameters can be
307 * done as we please. See FNIEMOP_DEF_1.
308 */
309#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
310
311/**
312 * Check if we're currently executing in real or virtual 8086 mode.
313 *
314 * @returns @c true if it is, @c false if not.
315 * @param a_pVCpu The IEM state of the current CPU.
316 */
317#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
318
319/**
320 * Check if we're currently executing in virtual 8086 mode.
321 *
322 * @returns @c true if it is, @c false if not.
323 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
324 */
325#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
326
327/**
328 * Check if we're currently executing in long mode.
329 *
330 * @returns @c true if it is, @c false if not.
331 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
332 */
333#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
334
335/**
336 * Check if we're currently executing in real mode.
337 *
338 * @returns @c true if it is, @c false if not.
339 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
340 */
341#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
342
343/**
344 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
345 * @returns PCCPUMFEATURES
346 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
347 */
348#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
349
350/**
351 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
352 * @returns PCCPUMFEATURES
353 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
354 */
355#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
356
357/**
358 * Evaluates to true if we're presenting an Intel CPU to the guest.
359 */
360#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
361
362/**
363 * Evaluates to true if we're presenting an AMD CPU to the guest.
364 */
365#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
366
367/**
368 * Check if the address is canonical.
369 */
370#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
371
372/** @def IEM_USE_UNALIGNED_DATA_ACCESS
373 * Use unaligned accesses instead of elaborate byte assembly. */
374#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
375# define IEM_USE_UNALIGNED_DATA_ACCESS
376#endif
377
378#ifdef VBOX_WITH_NESTED_HWVIRT
379/**
380 * Check the common SVM instruction preconditions.
381 */
382# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
383 do { \
384 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
385 { \
386 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
387 return iemRaiseUndefinedOpcode(pVCpu); \
388 } \
389 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
390 { \
391 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
392 return iemRaiseUndefinedOpcode(pVCpu); \
393 } \
394 if (pVCpu->iem.s.uCpl != 0) \
395 { \
396 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
397 return iemRaiseGeneralProtectionFault0(pVCpu); \
398 } \
399 } while (0)
400
401/**
402 * Check if an SVM is enabled.
403 */
404# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
405
406/**
407 * Check if an SVM control/instruction intercept is set.
408 */
409# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
410
411/**
412 * Check if an SVM read CRx intercept is set.
413 */
414# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
415
416/**
417 * Check if an SVM write CRx intercept is set.
418 */
419# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
420
421/**
422 * Check if an SVM read DRx intercept is set.
423 */
424# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
425
426/**
427 * Check if an SVM write DRx intercept is set.
428 */
429# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
430
431/**
432 * Check if an SVM exception intercept is set.
433 */
434# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uVector)))
435
436/**
437 * Invokes the SVM \#VMEXIT handler for the nested-guest.
438 */
439# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
440 do \
441 { \
442 VBOXSTRICTRC rcStrictVmExit = HMSvmNstGstVmExit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), \
443 (a_uExitInfo2)); \
444 return rcStrictVmExit == VINF_SVM_VMEXIT ? VINF_SUCCESS : rcStrictVmExit; \
445 } while (0)
446
447/**
448 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
449 * corresponding decode assist information.
450 */
451# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
452 do \
453 { \
454 uint64_t uExitInfo1; \
455 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssist \
456 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
457 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
458 else \
459 uExitInfo1 = 0; \
460 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
461 } while (0)
462
463/**
464 * Checks and handles an SVM MSR intercept.
465 */
466# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) \
467 HMSvmNstGstHandleMsrIntercept((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_idMsr), (a_fWrite))
468
469#else
470# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
471# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
472# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
473# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
474# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
475# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
476# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
477# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
478# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
479# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
480# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) (VERR_SVM_IPE_1)
481
482#endif /* VBOX_WITH_NESTED_HWVIRT */
483
484
485/*********************************************************************************************************************************
486* Global Variables *
487*********************************************************************************************************************************/
488extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
489
490
491/** Function table for the ADD instruction. */
492IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
493{
494 iemAImpl_add_u8, iemAImpl_add_u8_locked,
495 iemAImpl_add_u16, iemAImpl_add_u16_locked,
496 iemAImpl_add_u32, iemAImpl_add_u32_locked,
497 iemAImpl_add_u64, iemAImpl_add_u64_locked
498};
499
500/** Function table for the ADC instruction. */
501IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
502{
503 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
504 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
505 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
506 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
507};
508
509/** Function table for the SUB instruction. */
510IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
511{
512 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
513 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
514 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
515 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
516};
517
518/** Function table for the SBB instruction. */
519IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
520{
521 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
522 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
523 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
524 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
525};
526
527/** Function table for the OR instruction. */
528IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
529{
530 iemAImpl_or_u8, iemAImpl_or_u8_locked,
531 iemAImpl_or_u16, iemAImpl_or_u16_locked,
532 iemAImpl_or_u32, iemAImpl_or_u32_locked,
533 iemAImpl_or_u64, iemAImpl_or_u64_locked
534};
535
536/** Function table for the XOR instruction. */
537IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
538{
539 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
540 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
541 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
542 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
543};
544
545/** Function table for the AND instruction. */
546IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
547{
548 iemAImpl_and_u8, iemAImpl_and_u8_locked,
549 iemAImpl_and_u16, iemAImpl_and_u16_locked,
550 iemAImpl_and_u32, iemAImpl_and_u32_locked,
551 iemAImpl_and_u64, iemAImpl_and_u64_locked
552};
553
554/** Function table for the CMP instruction.
555 * @remarks Making operand order ASSUMPTIONS.
556 */
557IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
558{
559 iemAImpl_cmp_u8, NULL,
560 iemAImpl_cmp_u16, NULL,
561 iemAImpl_cmp_u32, NULL,
562 iemAImpl_cmp_u64, NULL
563};
564
565/** Function table for the TEST instruction.
566 * @remarks Making operand order ASSUMPTIONS.
567 */
568IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
569{
570 iemAImpl_test_u8, NULL,
571 iemAImpl_test_u16, NULL,
572 iemAImpl_test_u32, NULL,
573 iemAImpl_test_u64, NULL
574};
575
576/** Function table for the BT instruction. */
577IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
578{
579 NULL, NULL,
580 iemAImpl_bt_u16, NULL,
581 iemAImpl_bt_u32, NULL,
582 iemAImpl_bt_u64, NULL
583};
584
585/** Function table for the BTC instruction. */
586IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
587{
588 NULL, NULL,
589 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
590 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
591 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
592};
593
594/** Function table for the BTR instruction. */
595IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
596{
597 NULL, NULL,
598 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
599 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
600 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
601};
602
603/** Function table for the BTS instruction. */
604IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
605{
606 NULL, NULL,
607 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
608 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
609 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
610};
611
612/** Function table for the BSF instruction. */
613IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
614{
615 NULL, NULL,
616 iemAImpl_bsf_u16, NULL,
617 iemAImpl_bsf_u32, NULL,
618 iemAImpl_bsf_u64, NULL
619};
620
621/** Function table for the BSR instruction. */
622IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
623{
624 NULL, NULL,
625 iemAImpl_bsr_u16, NULL,
626 iemAImpl_bsr_u32, NULL,
627 iemAImpl_bsr_u64, NULL
628};
629
630/** Function table for the IMUL instruction. */
631IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
632{
633 NULL, NULL,
634 iemAImpl_imul_two_u16, NULL,
635 iemAImpl_imul_two_u32, NULL,
636 iemAImpl_imul_two_u64, NULL
637};
638
639/** Group 1 /r lookup table. */
640IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
641{
642 &g_iemAImpl_add,
643 &g_iemAImpl_or,
644 &g_iemAImpl_adc,
645 &g_iemAImpl_sbb,
646 &g_iemAImpl_and,
647 &g_iemAImpl_sub,
648 &g_iemAImpl_xor,
649 &g_iemAImpl_cmp
650};
651
652/** Function table for the INC instruction. */
653IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
654{
655 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
656 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
657 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
658 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
659};
660
661/** Function table for the DEC instruction. */
662IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
663{
664 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
665 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
666 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
667 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
668};
669
670/** Function table for the NEG instruction. */
671IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
672{
673 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
674 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
675 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
676 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
677};
678
679/** Function table for the NOT instruction. */
680IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
681{
682 iemAImpl_not_u8, iemAImpl_not_u8_locked,
683 iemAImpl_not_u16, iemAImpl_not_u16_locked,
684 iemAImpl_not_u32, iemAImpl_not_u32_locked,
685 iemAImpl_not_u64, iemAImpl_not_u64_locked
686};
687
688
689/** Function table for the ROL instruction. */
690IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
691{
692 iemAImpl_rol_u8,
693 iemAImpl_rol_u16,
694 iemAImpl_rol_u32,
695 iemAImpl_rol_u64
696};
697
698/** Function table for the ROR instruction. */
699IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
700{
701 iemAImpl_ror_u8,
702 iemAImpl_ror_u16,
703 iemAImpl_ror_u32,
704 iemAImpl_ror_u64
705};
706
707/** Function table for the RCL instruction. */
708IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
709{
710 iemAImpl_rcl_u8,
711 iemAImpl_rcl_u16,
712 iemAImpl_rcl_u32,
713 iemAImpl_rcl_u64
714};
715
716/** Function table for the RCR instruction. */
717IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
718{
719 iemAImpl_rcr_u8,
720 iemAImpl_rcr_u16,
721 iemAImpl_rcr_u32,
722 iemAImpl_rcr_u64
723};
724
725/** Function table for the SHL instruction. */
726IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
727{
728 iemAImpl_shl_u8,
729 iemAImpl_shl_u16,
730 iemAImpl_shl_u32,
731 iemAImpl_shl_u64
732};
733
734/** Function table for the SHR instruction. */
735IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
736{
737 iemAImpl_shr_u8,
738 iemAImpl_shr_u16,
739 iemAImpl_shr_u32,
740 iemAImpl_shr_u64
741};
742
743/** Function table for the SAR instruction. */
744IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
745{
746 iemAImpl_sar_u8,
747 iemAImpl_sar_u16,
748 iemAImpl_sar_u32,
749 iemAImpl_sar_u64
750};
751
752
753/** Function table for the MUL instruction. */
754IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
755{
756 iemAImpl_mul_u8,
757 iemAImpl_mul_u16,
758 iemAImpl_mul_u32,
759 iemAImpl_mul_u64
760};
761
762/** Function table for the IMUL instruction working implicitly on rAX. */
763IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
764{
765 iemAImpl_imul_u8,
766 iemAImpl_imul_u16,
767 iemAImpl_imul_u32,
768 iemAImpl_imul_u64
769};
770
771/** Function table for the DIV instruction. */
772IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
773{
774 iemAImpl_div_u8,
775 iemAImpl_div_u16,
776 iemAImpl_div_u32,
777 iemAImpl_div_u64
778};
779
780/** Function table for the MUL instruction. */
781IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
782{
783 iemAImpl_idiv_u8,
784 iemAImpl_idiv_u16,
785 iemAImpl_idiv_u32,
786 iemAImpl_idiv_u64
787};
788
789/** Function table for the SHLD instruction */
790IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
791{
792 iemAImpl_shld_u16,
793 iemAImpl_shld_u32,
794 iemAImpl_shld_u64,
795};
796
797/** Function table for the SHRD instruction */
798IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
799{
800 iemAImpl_shrd_u16,
801 iemAImpl_shrd_u32,
802 iemAImpl_shrd_u64,
803};
804
805
806/** Function table for the PUNPCKLBW instruction */
807IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
808/** Function table for the PUNPCKLBD instruction */
809IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
810/** Function table for the PUNPCKLDQ instruction */
811IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
812/** Function table for the PUNPCKLQDQ instruction */
813IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
814
815/** Function table for the PUNPCKHBW instruction */
816IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
817/** Function table for the PUNPCKHBD instruction */
818IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
819/** Function table for the PUNPCKHDQ instruction */
820IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
821/** Function table for the PUNPCKHQDQ instruction */
822IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
823
824/** Function table for the PXOR instruction */
825IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
826/** Function table for the PCMPEQB instruction */
827IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
828/** Function table for the PCMPEQW instruction */
829IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
830/** Function table for the PCMPEQD instruction */
831IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
832
833
834#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
835/** What IEM just wrote. */
836uint8_t g_abIemWrote[256];
837/** How much IEM just wrote. */
838size_t g_cbIemWrote;
839#endif
840
841
842/*********************************************************************************************************************************
843* Internal Functions *
844*********************************************************************************************************************************/
845IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
846IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
847IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
848IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
849/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
850IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
851IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
852IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
853IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
854IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
855IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
856IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
857IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
858IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
859IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
860IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
861IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
862#ifdef IEM_WITH_SETJMP
863DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
864DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
865DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
866DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
868#endif
869
870IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
871IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
872IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
873IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
874IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
875IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
876IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
877IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
878IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
879IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
880IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
881IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
882IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
883IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
884IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
885IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
886
887#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
888IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
889#endif
890IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
891IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
892
893#ifdef VBOX_WITH_NESTED_HWVIRT
894/**
895 * Checks if the intercepted IO instruction causes a \#VMEXIT and handles it
896 * accordingly.
897 *
898 * @returns VBox strict status code.
899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
900 * @param u16Port The IO port being accessed.
901 * @param enmIoType The type of IO access.
902 * @param cbReg The IO operand size in bytes.
903 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
904 * @param iEffSeg The effective segment number.
905 * @param fRep Whether this is a repeating IO instruction (REP prefix).
906 * @param fStrIo Whether this is a string IO instruction.
907 * @param cbInstr The length of the IO instruction in bytes.
908 *
909 * @remarks This must be called only when IO instructions are intercepted by the
910 * nested-guest hypervisor.
911 */
912IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
913 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
914{
915 Assert(IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
916 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
917 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
918
919 static const uint32_t s_auIoOpSize[] = { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
920 static const uint32_t s_auIoAddrSize[] = { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
921
922 SVMIOIOEXITINFO IoExitInfo;
923 IoExitInfo.u = s_auIoOpSize[cbReg & 7];
924 IoExitInfo.u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
925 IoExitInfo.n.u1STR = fStrIo;
926 IoExitInfo.n.u1REP = fRep;
927 IoExitInfo.n.u3SEG = iEffSeg & 0x7;
928 IoExitInfo.n.u1Type = enmIoType;
929 IoExitInfo.n.u16Port = u16Port;
930
931 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
932 return HMSvmNstGstHandleIOIntercept(pVCpu, pCtx, &IoExitInfo, pCtx->rip + cbInstr);
933}
934
935#else
936IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
937 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
938{
939 RT_NOREF9(pVCpu, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo, cbInstr);
940 return VERR_IEM_IPE_9;
941}
942#endif /* VBOX_WITH_NESTED_HWVIRT */
943
944
945/**
946 * Sets the pass up status.
947 *
948 * @returns VINF_SUCCESS.
949 * @param pVCpu The cross context virtual CPU structure of the
950 * calling thread.
951 * @param rcPassUp The pass up status. Must be informational.
952 * VINF_SUCCESS is not allowed.
953 */
954IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
955{
956 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
957
958 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
959 if (rcOldPassUp == VINF_SUCCESS)
960 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
961 /* If both are EM scheduling codes, use EM priority rules. */
962 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
963 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
964 {
965 if (rcPassUp < rcOldPassUp)
966 {
967 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
968 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
969 }
970 else
971 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
972 }
973 /* Override EM scheduling with specific status code. */
974 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
975 {
976 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
977 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
978 }
979 /* Don't override specific status code, first come first served. */
980 else
981 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
982 return VINF_SUCCESS;
983}
984
985
986/**
987 * Calculates the CPU mode.
988 *
989 * This is mainly for updating IEMCPU::enmCpuMode.
990 *
991 * @returns CPU mode.
992 * @param pCtx The register context for the CPU.
993 */
994DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
995{
996 if (CPUMIsGuestIn64BitCodeEx(pCtx))
997 return IEMMODE_64BIT;
998 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
999 return IEMMODE_32BIT;
1000 return IEMMODE_16BIT;
1001}
1002
1003
1004/**
1005 * Initializes the execution state.
1006 *
1007 * @param pVCpu The cross context virtual CPU structure of the
1008 * calling thread.
1009 * @param fBypassHandlers Whether to bypass access handlers.
1010 *
1011 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1012 * side-effects in strict builds.
1013 */
1014DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1015{
1016 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1017
1018 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1019
1020#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1022 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1023 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1025 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1026 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1027 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1028 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1029#endif
1030
1031#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1032 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1033#endif
1034 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1035 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1036#ifdef VBOX_STRICT
1037 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1038 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1039 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1040 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1041 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1042 pVCpu->iem.s.uRexReg = 127;
1043 pVCpu->iem.s.uRexB = 127;
1044 pVCpu->iem.s.uRexIndex = 127;
1045 pVCpu->iem.s.iEffSeg = 127;
1046 pVCpu->iem.s.idxPrefix = 127;
1047 pVCpu->iem.s.uVex3rdReg = 127;
1048 pVCpu->iem.s.uVexLength = 127;
1049 pVCpu->iem.s.fEvexStuff = 127;
1050 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1051# ifdef IEM_WITH_CODE_TLB
1052 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1053 pVCpu->iem.s.pbInstrBuf = NULL;
1054 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1055 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1056 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1057 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1058# else
1059 pVCpu->iem.s.offOpcode = 127;
1060 pVCpu->iem.s.cbOpcode = 127;
1061# endif
1062#endif
1063
1064 pVCpu->iem.s.cActiveMappings = 0;
1065 pVCpu->iem.s.iNextMapping = 0;
1066 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1067 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1068#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1069 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1070 && pCtx->cs.u64Base == 0
1071 && pCtx->cs.u32Limit == UINT32_MAX
1072 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1073 if (!pVCpu->iem.s.fInPatchCode)
1074 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1075#endif
1076
1077#ifdef IEM_VERIFICATION_MODE_FULL
1078 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1079 pVCpu->iem.s.fNoRem = true;
1080#endif
1081}
1082
1083
1084/**
1085 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1086 *
1087 * @param pVCpu The cross context virtual CPU structure of the
1088 * calling thread.
1089 */
1090DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1091{
1092 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1093#ifdef IEM_VERIFICATION_MODE_FULL
1094 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1095#endif
1096#ifdef VBOX_STRICT
1097# ifdef IEM_WITH_CODE_TLB
1098 NOREF(pVCpu);
1099# else
1100 pVCpu->iem.s.cbOpcode = 0;
1101# endif
1102#else
1103 NOREF(pVCpu);
1104#endif
1105}
1106
1107
1108/**
1109 * Initializes the decoder state.
1110 *
1111 * iemReInitDecoder is mostly a copy of this function.
1112 *
1113 * @param pVCpu The cross context virtual CPU structure of the
1114 * calling thread.
1115 * @param fBypassHandlers Whether to bypass access handlers.
1116 */
1117DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1118{
1119 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1120
1121 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1122
1123#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1128 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1129 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1130 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1131 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1132#endif
1133
1134#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1135 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1136#endif
1137 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1138#ifdef IEM_VERIFICATION_MODE_FULL
1139 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1140 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1141#endif
1142 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1143 pVCpu->iem.s.enmCpuMode = enmMode;
1144 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1145 pVCpu->iem.s.enmEffAddrMode = enmMode;
1146 if (enmMode != IEMMODE_64BIT)
1147 {
1148 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1149 pVCpu->iem.s.enmEffOpSize = enmMode;
1150 }
1151 else
1152 {
1153 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1154 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1155 }
1156 pVCpu->iem.s.fPrefixes = 0;
1157 pVCpu->iem.s.uRexReg = 0;
1158 pVCpu->iem.s.uRexB = 0;
1159 pVCpu->iem.s.uRexIndex = 0;
1160 pVCpu->iem.s.idxPrefix = 0;
1161 pVCpu->iem.s.uVex3rdReg = 0;
1162 pVCpu->iem.s.uVexLength = 0;
1163 pVCpu->iem.s.fEvexStuff = 0;
1164 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1165#ifdef IEM_WITH_CODE_TLB
1166 pVCpu->iem.s.pbInstrBuf = NULL;
1167 pVCpu->iem.s.offInstrNextByte = 0;
1168 pVCpu->iem.s.offCurInstrStart = 0;
1169# ifdef VBOX_STRICT
1170 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1171 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1172 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1173# endif
1174#else
1175 pVCpu->iem.s.offOpcode = 0;
1176 pVCpu->iem.s.cbOpcode = 0;
1177#endif
1178 pVCpu->iem.s.cActiveMappings = 0;
1179 pVCpu->iem.s.iNextMapping = 0;
1180 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1181 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1182#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1183 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1184 && pCtx->cs.u64Base == 0
1185 && pCtx->cs.u32Limit == UINT32_MAX
1186 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1187 if (!pVCpu->iem.s.fInPatchCode)
1188 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1189#endif
1190
1191#ifdef DBGFTRACE_ENABLED
1192 switch (enmMode)
1193 {
1194 case IEMMODE_64BIT:
1195 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1196 break;
1197 case IEMMODE_32BIT:
1198 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1199 break;
1200 case IEMMODE_16BIT:
1201 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1202 break;
1203 }
1204#endif
1205}
1206
1207
1208/**
1209 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1210 *
1211 * This is mostly a copy of iemInitDecoder.
1212 *
1213 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1214 */
1215DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1216{
1217 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1218
1219 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1220
1221#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1222 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1224 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1226 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1227 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1228 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1229 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1230#endif
1231
1232 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1233#ifdef IEM_VERIFICATION_MODE_FULL
1234 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1235 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1236#endif
1237 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1238 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1239 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1240 pVCpu->iem.s.enmEffAddrMode = enmMode;
1241 if (enmMode != IEMMODE_64BIT)
1242 {
1243 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1244 pVCpu->iem.s.enmEffOpSize = enmMode;
1245 }
1246 else
1247 {
1248 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1249 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1250 }
1251 pVCpu->iem.s.fPrefixes = 0;
1252 pVCpu->iem.s.uRexReg = 0;
1253 pVCpu->iem.s.uRexB = 0;
1254 pVCpu->iem.s.uRexIndex = 0;
1255 pVCpu->iem.s.idxPrefix = 0;
1256 pVCpu->iem.s.uVex3rdReg = 0;
1257 pVCpu->iem.s.uVexLength = 0;
1258 pVCpu->iem.s.fEvexStuff = 0;
1259 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1260#ifdef IEM_WITH_CODE_TLB
1261 if (pVCpu->iem.s.pbInstrBuf)
1262 {
1263 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1264 - pVCpu->iem.s.uInstrBufPc;
1265 if (off < pVCpu->iem.s.cbInstrBufTotal)
1266 {
1267 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1268 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1269 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1270 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1271 else
1272 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1273 }
1274 else
1275 {
1276 pVCpu->iem.s.pbInstrBuf = NULL;
1277 pVCpu->iem.s.offInstrNextByte = 0;
1278 pVCpu->iem.s.offCurInstrStart = 0;
1279 pVCpu->iem.s.cbInstrBuf = 0;
1280 pVCpu->iem.s.cbInstrBufTotal = 0;
1281 }
1282 }
1283 else
1284 {
1285 pVCpu->iem.s.offInstrNextByte = 0;
1286 pVCpu->iem.s.offCurInstrStart = 0;
1287 pVCpu->iem.s.cbInstrBuf = 0;
1288 pVCpu->iem.s.cbInstrBufTotal = 0;
1289 }
1290#else
1291 pVCpu->iem.s.cbOpcode = 0;
1292 pVCpu->iem.s.offOpcode = 0;
1293#endif
1294 Assert(pVCpu->iem.s.cActiveMappings == 0);
1295 pVCpu->iem.s.iNextMapping = 0;
1296 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1297 Assert(pVCpu->iem.s.fBypassHandlers == false);
1298#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1299 if (!pVCpu->iem.s.fInPatchCode)
1300 { /* likely */ }
1301 else
1302 {
1303 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1304 && pCtx->cs.u64Base == 0
1305 && pCtx->cs.u32Limit == UINT32_MAX
1306 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1307 if (!pVCpu->iem.s.fInPatchCode)
1308 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1309 }
1310#endif
1311
1312#ifdef DBGFTRACE_ENABLED
1313 switch (enmMode)
1314 {
1315 case IEMMODE_64BIT:
1316 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1317 break;
1318 case IEMMODE_32BIT:
1319 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1320 break;
1321 case IEMMODE_16BIT:
1322 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1323 break;
1324 }
1325#endif
1326}
1327
1328
1329
1330/**
1331 * Prefetch opcodes the first time when starting executing.
1332 *
1333 * @returns Strict VBox status code.
1334 * @param pVCpu The cross context virtual CPU structure of the
1335 * calling thread.
1336 * @param fBypassHandlers Whether to bypass access handlers.
1337 */
1338IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1339{
1340#ifdef IEM_VERIFICATION_MODE_FULL
1341 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1342#endif
1343 iemInitDecoder(pVCpu, fBypassHandlers);
1344
1345#ifdef IEM_WITH_CODE_TLB
1346 /** @todo Do ITLB lookup here. */
1347
1348#else /* !IEM_WITH_CODE_TLB */
1349
1350 /*
1351 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1352 *
1353 * First translate CS:rIP to a physical address.
1354 */
1355 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1356 uint32_t cbToTryRead;
1357 RTGCPTR GCPtrPC;
1358 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1359 {
1360 cbToTryRead = PAGE_SIZE;
1361 GCPtrPC = pCtx->rip;
1362 if (IEM_IS_CANONICAL(GCPtrPC))
1363 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1364 else
1365 return iemRaiseGeneralProtectionFault0(pVCpu);
1366 }
1367 else
1368 {
1369 uint32_t GCPtrPC32 = pCtx->eip;
1370 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1371 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1372 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1373 else
1374 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1375 if (cbToTryRead) { /* likely */ }
1376 else /* overflowed */
1377 {
1378 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1379 cbToTryRead = UINT32_MAX;
1380 }
1381 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1382 Assert(GCPtrPC <= UINT32_MAX);
1383 }
1384
1385# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1386 /* Allow interpretation of patch manager code blocks since they can for
1387 instance throw #PFs for perfectly good reasons. */
1388 if (pVCpu->iem.s.fInPatchCode)
1389 {
1390 size_t cbRead = 0;
1391 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1392 AssertRCReturn(rc, rc);
1393 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1394 return VINF_SUCCESS;
1395 }
1396# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1397
1398 RTGCPHYS GCPhys;
1399 uint64_t fFlags;
1400 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1401 if (RT_SUCCESS(rc)) { /* probable */ }
1402 else
1403 {
1404 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1405 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1406 }
1407 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1408 else
1409 {
1410 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1411 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1412 }
1413 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1414 else
1415 {
1416 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1417 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1418 }
1419 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1420 /** @todo Check reserved bits and such stuff. PGM is better at doing
1421 * that, so do it when implementing the guest virtual address
1422 * TLB... */
1423
1424# ifdef IEM_VERIFICATION_MODE_FULL
1425 /*
1426 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1427 * instruction.
1428 */
1429 /** @todo optimize this differently by not using PGMPhysRead. */
1430 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1431 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1432 if ( offPrevOpcodes < cbOldOpcodes
1433 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1434 {
1435 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1436 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1437 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1438 pVCpu->iem.s.cbOpcode = cbNew;
1439 return VINF_SUCCESS;
1440 }
1441# endif
1442
1443 /*
1444 * Read the bytes at this address.
1445 */
1446 PVM pVM = pVCpu->CTX_SUFF(pVM);
1447# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1448 size_t cbActual;
1449 if ( PATMIsEnabled(pVM)
1450 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1451 {
1452 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1453 Assert(cbActual > 0);
1454 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1455 }
1456 else
1457# endif
1458 {
1459 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1460 if (cbToTryRead > cbLeftOnPage)
1461 cbToTryRead = cbLeftOnPage;
1462 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1463 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1464
1465 if (!pVCpu->iem.s.fBypassHandlers)
1466 {
1467 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1468 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1469 { /* likely */ }
1470 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1471 {
1472 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1473 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1474 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1475 }
1476 else
1477 {
1478 Log((RT_SUCCESS(rcStrict)
1479 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1480 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1481 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1482 return rcStrict;
1483 }
1484 }
1485 else
1486 {
1487 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1488 if (RT_SUCCESS(rc))
1489 { /* likely */ }
1490 else
1491 {
1492 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1493 GCPtrPC, GCPhys, rc, cbToTryRead));
1494 return rc;
1495 }
1496 }
1497 pVCpu->iem.s.cbOpcode = cbToTryRead;
1498 }
1499#endif /* !IEM_WITH_CODE_TLB */
1500 return VINF_SUCCESS;
1501}
1502
1503
1504/**
1505 * Invalidates the IEM TLBs.
1506 *
1507 * This is called internally as well as by PGM when moving GC mappings.
1508 *
1509 * @returns
1510 * @param pVCpu The cross context virtual CPU structure of the calling
1511 * thread.
1512 * @param fVmm Set when PGM calls us with a remapping.
1513 */
1514VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1515{
1516#ifdef IEM_WITH_CODE_TLB
1517 pVCpu->iem.s.cbInstrBufTotal = 0;
1518 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1519 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1520 { /* very likely */ }
1521 else
1522 {
1523 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1524 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1525 while (i-- > 0)
1526 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1527 }
1528#endif
1529
1530#ifdef IEM_WITH_DATA_TLB
1531 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1532 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1533 { /* very likely */ }
1534 else
1535 {
1536 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1537 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1538 while (i-- > 0)
1539 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1540 }
1541#endif
1542 NOREF(pVCpu); NOREF(fVmm);
1543}
1544
1545
1546/**
1547 * Invalidates a page in the TLBs.
1548 *
1549 * @param pVCpu The cross context virtual CPU structure of the calling
1550 * thread.
1551 * @param GCPtr The address of the page to invalidate
1552 */
1553VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1554{
1555#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1556 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1557 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1558 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1559 uintptr_t idx = (uint8_t)GCPtr;
1560
1561# ifdef IEM_WITH_CODE_TLB
1562 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1563 {
1564 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1565 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1566 pVCpu->iem.s.cbInstrBufTotal = 0;
1567 }
1568# endif
1569
1570# ifdef IEM_WITH_DATA_TLB
1571 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1572 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1573# endif
1574#else
1575 NOREF(pVCpu); NOREF(GCPtr);
1576#endif
1577}
1578
1579
1580/**
1581 * Invalidates the host physical aspects of the IEM TLBs.
1582 *
1583 * This is called internally as well as by PGM when moving GC mappings.
1584 *
1585 * @param pVCpu The cross context virtual CPU structure of the calling
1586 * thread.
1587 */
1588VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1589{
1590#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1591 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1592
1593# ifdef IEM_WITH_CODE_TLB
1594 pVCpu->iem.s.cbInstrBufTotal = 0;
1595# endif
1596 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1597 if (uTlbPhysRev != 0)
1598 {
1599 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1600 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1601 }
1602 else
1603 {
1604 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1605 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1606
1607 unsigned i;
1608# ifdef IEM_WITH_CODE_TLB
1609 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1610 while (i-- > 0)
1611 {
1612 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1613 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1614 }
1615# endif
1616# ifdef IEM_WITH_DATA_TLB
1617 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1618 while (i-- > 0)
1619 {
1620 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1621 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1622 }
1623# endif
1624 }
1625#else
1626 NOREF(pVCpu);
1627#endif
1628}
1629
1630
1631/**
1632 * Invalidates the host physical aspects of the IEM TLBs.
1633 *
1634 * This is called internally as well as by PGM when moving GC mappings.
1635 *
1636 * @param pVM The cross context VM structure.
1637 *
1638 * @remarks Caller holds the PGM lock.
1639 */
1640VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1641{
1642 RT_NOREF_PV(pVM);
1643}
1644
1645#ifdef IEM_WITH_CODE_TLB
1646
1647/**
1648 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1649 * failure and jumps.
1650 *
1651 * We end up here for a number of reasons:
1652 * - pbInstrBuf isn't yet initialized.
1653 * - Advancing beyond the buffer boundrary (e.g. cross page).
1654 * - Advancing beyond the CS segment limit.
1655 * - Fetching from non-mappable page (e.g. MMIO).
1656 *
1657 * @param pVCpu The cross context virtual CPU structure of the
1658 * calling thread.
1659 * @param pvDst Where to return the bytes.
1660 * @param cbDst Number of bytes to read.
1661 *
1662 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1663 */
1664IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1665{
1666#ifdef IN_RING3
1667//__debugbreak();
1668 for (;;)
1669 {
1670 Assert(cbDst <= 8);
1671 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1672
1673 /*
1674 * We might have a partial buffer match, deal with that first to make the
1675 * rest simpler. This is the first part of the cross page/buffer case.
1676 */
1677 if (pVCpu->iem.s.pbInstrBuf != NULL)
1678 {
1679 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1680 {
1681 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1682 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1683 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1684
1685 cbDst -= cbCopy;
1686 pvDst = (uint8_t *)pvDst + cbCopy;
1687 offBuf += cbCopy;
1688 pVCpu->iem.s.offInstrNextByte += offBuf;
1689 }
1690 }
1691
1692 /*
1693 * Check segment limit, figuring how much we're allowed to access at this point.
1694 *
1695 * We will fault immediately if RIP is past the segment limit / in non-canonical
1696 * territory. If we do continue, there are one or more bytes to read before we
1697 * end up in trouble and we need to do that first before faulting.
1698 */
1699 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1700 RTGCPTR GCPtrFirst;
1701 uint32_t cbMaxRead;
1702 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1703 {
1704 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1705 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1706 { /* likely */ }
1707 else
1708 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1709 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1710 }
1711 else
1712 {
1713 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1714 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1715 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1716 { /* likely */ }
1717 else
1718 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1719 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1720 if (cbMaxRead != 0)
1721 { /* likely */ }
1722 else
1723 {
1724 /* Overflowed because address is 0 and limit is max. */
1725 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1726 cbMaxRead = X86_PAGE_SIZE;
1727 }
1728 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1729 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1730 if (cbMaxRead2 < cbMaxRead)
1731 cbMaxRead = cbMaxRead2;
1732 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1733 }
1734
1735 /*
1736 * Get the TLB entry for this piece of code.
1737 */
1738 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1739 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1740 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1741 if (pTlbe->uTag == uTag)
1742 {
1743 /* likely when executing lots of code, otherwise unlikely */
1744# ifdef VBOX_WITH_STATISTICS
1745 pVCpu->iem.s.CodeTlb.cTlbHits++;
1746# endif
1747 }
1748 else
1749 {
1750 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1751# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1752 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1753 {
1754 pTlbe->uTag = uTag;
1755 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1756 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1757 pTlbe->GCPhys = NIL_RTGCPHYS;
1758 pTlbe->pbMappingR3 = NULL;
1759 }
1760 else
1761# endif
1762 {
1763 RTGCPHYS GCPhys;
1764 uint64_t fFlags;
1765 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1766 if (RT_FAILURE(rc))
1767 {
1768 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1769 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1770 }
1771
1772 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1773 pTlbe->uTag = uTag;
1774 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1775 pTlbe->GCPhys = GCPhys;
1776 pTlbe->pbMappingR3 = NULL;
1777 }
1778 }
1779
1780 /*
1781 * Check TLB page table level access flags.
1782 */
1783 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1784 {
1785 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1786 {
1787 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1788 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1789 }
1790 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1791 {
1792 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1793 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1794 }
1795 }
1796
1797# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1798 /*
1799 * Allow interpretation of patch manager code blocks since they can for
1800 * instance throw #PFs for perfectly good reasons.
1801 */
1802 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1803 { /* no unlikely */ }
1804 else
1805 {
1806 /** @todo Could be optimized this a little in ring-3 if we liked. */
1807 size_t cbRead = 0;
1808 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1809 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1810 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1811 return;
1812 }
1813# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1814
1815 /*
1816 * Look up the physical page info if necessary.
1817 */
1818 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1819 { /* not necessary */ }
1820 else
1821 {
1822 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1823 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1824 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1825 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1826 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1827 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1828 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1829 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1830 }
1831
1832# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1833 /*
1834 * Try do a direct read using the pbMappingR3 pointer.
1835 */
1836 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1837 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1838 {
1839 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1840 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1841 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1842 {
1843 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1844 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1845 }
1846 else
1847 {
1848 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1849 Assert(cbInstr < cbMaxRead);
1850 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1851 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1852 }
1853 if (cbDst <= cbMaxRead)
1854 {
1855 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1856 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1857 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1858 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1859 return;
1860 }
1861 pVCpu->iem.s.pbInstrBuf = NULL;
1862
1863 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1864 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1865 }
1866 else
1867# endif
1868#if 0
1869 /*
1870 * If there is no special read handling, so we can read a bit more and
1871 * put it in the prefetch buffer.
1872 */
1873 if ( cbDst < cbMaxRead
1874 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1875 {
1876 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1877 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1878 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1879 { /* likely */ }
1880 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1881 {
1882 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1883 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1884 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1885 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1886 }
1887 else
1888 {
1889 Log((RT_SUCCESS(rcStrict)
1890 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1891 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1892 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1893 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1894 }
1895 }
1896 /*
1897 * Special read handling, so only read exactly what's needed.
1898 * This is a highly unlikely scenario.
1899 */
1900 else
1901#endif
1902 {
1903 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1904 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1905 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1906 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1907 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1908 { /* likely */ }
1909 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1910 {
1911 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1912 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1913 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1914 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1915 }
1916 else
1917 {
1918 Log((RT_SUCCESS(rcStrict)
1919 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1920 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1921 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1922 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1923 }
1924 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1925 if (cbToRead == cbDst)
1926 return;
1927 }
1928
1929 /*
1930 * More to read, loop.
1931 */
1932 cbDst -= cbMaxRead;
1933 pvDst = (uint8_t *)pvDst + cbMaxRead;
1934 }
1935#else
1936 RT_NOREF(pvDst, cbDst);
1937 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1938#endif
1939}
1940
1941#else
1942
1943/**
1944 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1945 * exception if it fails.
1946 *
1947 * @returns Strict VBox status code.
1948 * @param pVCpu The cross context virtual CPU structure of the
1949 * calling thread.
1950 * @param cbMin The minimum number of bytes relative offOpcode
1951 * that must be read.
1952 */
1953IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1954{
1955 /*
1956 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1957 *
1958 * First translate CS:rIP to a physical address.
1959 */
1960 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1961 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1962 uint32_t cbToTryRead;
1963 RTGCPTR GCPtrNext;
1964 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1965 {
1966 cbToTryRead = PAGE_SIZE;
1967 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1968 if (!IEM_IS_CANONICAL(GCPtrNext))
1969 return iemRaiseGeneralProtectionFault0(pVCpu);
1970 }
1971 else
1972 {
1973 uint32_t GCPtrNext32 = pCtx->eip;
1974 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1975 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1976 if (GCPtrNext32 > pCtx->cs.u32Limit)
1977 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1978 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1979 if (!cbToTryRead) /* overflowed */
1980 {
1981 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1982 cbToTryRead = UINT32_MAX;
1983 /** @todo check out wrapping around the code segment. */
1984 }
1985 if (cbToTryRead < cbMin - cbLeft)
1986 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1987 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1988 }
1989
1990 /* Only read up to the end of the page, and make sure we don't read more
1991 than the opcode buffer can hold. */
1992 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1993 if (cbToTryRead > cbLeftOnPage)
1994 cbToTryRead = cbLeftOnPage;
1995 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1996 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1997/** @todo r=bird: Convert assertion into undefined opcode exception? */
1998 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1999
2000# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2001 /* Allow interpretation of patch manager code blocks since they can for
2002 instance throw #PFs for perfectly good reasons. */
2003 if (pVCpu->iem.s.fInPatchCode)
2004 {
2005 size_t cbRead = 0;
2006 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2007 AssertRCReturn(rc, rc);
2008 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2009 return VINF_SUCCESS;
2010 }
2011# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2012
2013 RTGCPHYS GCPhys;
2014 uint64_t fFlags;
2015 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2016 if (RT_FAILURE(rc))
2017 {
2018 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2019 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2020 }
2021 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2022 {
2023 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2024 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2025 }
2026 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2027 {
2028 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2029 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2030 }
2031 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2032 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2033 /** @todo Check reserved bits and such stuff. PGM is better at doing
2034 * that, so do it when implementing the guest virtual address
2035 * TLB... */
2036
2037 /*
2038 * Read the bytes at this address.
2039 *
2040 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2041 * and since PATM should only patch the start of an instruction there
2042 * should be no need to check again here.
2043 */
2044 if (!pVCpu->iem.s.fBypassHandlers)
2045 {
2046 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2047 cbToTryRead, PGMACCESSORIGIN_IEM);
2048 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2049 { /* likely */ }
2050 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2051 {
2052 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2053 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2054 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2055 }
2056 else
2057 {
2058 Log((RT_SUCCESS(rcStrict)
2059 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2060 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2061 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2062 return rcStrict;
2063 }
2064 }
2065 else
2066 {
2067 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2068 if (RT_SUCCESS(rc))
2069 { /* likely */ }
2070 else
2071 {
2072 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2073 return rc;
2074 }
2075 }
2076 pVCpu->iem.s.cbOpcode += cbToTryRead;
2077 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2078
2079 return VINF_SUCCESS;
2080}
2081
2082#endif /* !IEM_WITH_CODE_TLB */
2083#ifndef IEM_WITH_SETJMP
2084
2085/**
2086 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2087 *
2088 * @returns Strict VBox status code.
2089 * @param pVCpu The cross context virtual CPU structure of the
2090 * calling thread.
2091 * @param pb Where to return the opcode byte.
2092 */
2093DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2094{
2095 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2096 if (rcStrict == VINF_SUCCESS)
2097 {
2098 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2099 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2100 pVCpu->iem.s.offOpcode = offOpcode + 1;
2101 }
2102 else
2103 *pb = 0;
2104 return rcStrict;
2105}
2106
2107
2108/**
2109 * Fetches the next opcode byte.
2110 *
2111 * @returns Strict VBox status code.
2112 * @param pVCpu The cross context virtual CPU structure of the
2113 * calling thread.
2114 * @param pu8 Where to return the opcode byte.
2115 */
2116DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2117{
2118 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2119 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2120 {
2121 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2122 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2123 return VINF_SUCCESS;
2124 }
2125 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2126}
2127
2128#else /* IEM_WITH_SETJMP */
2129
2130/**
2131 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2132 *
2133 * @returns The opcode byte.
2134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2135 */
2136DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2137{
2138# ifdef IEM_WITH_CODE_TLB
2139 uint8_t u8;
2140 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2141 return u8;
2142# else
2143 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2144 if (rcStrict == VINF_SUCCESS)
2145 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2146 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2147# endif
2148}
2149
2150
2151/**
2152 * Fetches the next opcode byte, longjmp on error.
2153 *
2154 * @returns The opcode byte.
2155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2156 */
2157DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2158{
2159# ifdef IEM_WITH_CODE_TLB
2160 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2161 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2162 if (RT_LIKELY( pbBuf != NULL
2163 && offBuf < pVCpu->iem.s.cbInstrBuf))
2164 {
2165 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2166 return pbBuf[offBuf];
2167 }
2168# else
2169 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2170 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2171 {
2172 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2173 return pVCpu->iem.s.abOpcode[offOpcode];
2174 }
2175# endif
2176 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2177}
2178
2179#endif /* IEM_WITH_SETJMP */
2180
2181/**
2182 * Fetches the next opcode byte, returns automatically on failure.
2183 *
2184 * @param a_pu8 Where to return the opcode byte.
2185 * @remark Implicitly references pVCpu.
2186 */
2187#ifndef IEM_WITH_SETJMP
2188# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2189 do \
2190 { \
2191 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2192 if (rcStrict2 == VINF_SUCCESS) \
2193 { /* likely */ } \
2194 else \
2195 return rcStrict2; \
2196 } while (0)
2197#else
2198# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2199#endif /* IEM_WITH_SETJMP */
2200
2201
2202#ifndef IEM_WITH_SETJMP
2203/**
2204 * Fetches the next signed byte from the opcode stream.
2205 *
2206 * @returns Strict VBox status code.
2207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2208 * @param pi8 Where to return the signed byte.
2209 */
2210DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2211{
2212 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2213}
2214#endif /* !IEM_WITH_SETJMP */
2215
2216
2217/**
2218 * Fetches the next signed byte from the opcode stream, returning automatically
2219 * on failure.
2220 *
2221 * @param a_pi8 Where to return the signed byte.
2222 * @remark Implicitly references pVCpu.
2223 */
2224#ifndef IEM_WITH_SETJMP
2225# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2226 do \
2227 { \
2228 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2229 if (rcStrict2 != VINF_SUCCESS) \
2230 return rcStrict2; \
2231 } while (0)
2232#else /* IEM_WITH_SETJMP */
2233# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2234
2235#endif /* IEM_WITH_SETJMP */
2236
2237#ifndef IEM_WITH_SETJMP
2238
2239/**
2240 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2241 *
2242 * @returns Strict VBox status code.
2243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2244 * @param pu16 Where to return the opcode dword.
2245 */
2246DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2247{
2248 uint8_t u8;
2249 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2250 if (rcStrict == VINF_SUCCESS)
2251 *pu16 = (int8_t)u8;
2252 return rcStrict;
2253}
2254
2255
2256/**
2257 * Fetches the next signed byte from the opcode stream, extending it to
2258 * unsigned 16-bit.
2259 *
2260 * @returns Strict VBox status code.
2261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2262 * @param pu16 Where to return the unsigned word.
2263 */
2264DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2265{
2266 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2267 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2268 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2269
2270 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2271 pVCpu->iem.s.offOpcode = offOpcode + 1;
2272 return VINF_SUCCESS;
2273}
2274
2275#endif /* !IEM_WITH_SETJMP */
2276
2277/**
2278 * Fetches the next signed byte from the opcode stream and sign-extending it to
2279 * a word, returning automatically on failure.
2280 *
2281 * @param a_pu16 Where to return the word.
2282 * @remark Implicitly references pVCpu.
2283 */
2284#ifndef IEM_WITH_SETJMP
2285# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2286 do \
2287 { \
2288 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2289 if (rcStrict2 != VINF_SUCCESS) \
2290 return rcStrict2; \
2291 } while (0)
2292#else
2293# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2294#endif
2295
2296#ifndef IEM_WITH_SETJMP
2297
2298/**
2299 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2300 *
2301 * @returns Strict VBox status code.
2302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2303 * @param pu32 Where to return the opcode dword.
2304 */
2305DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2306{
2307 uint8_t u8;
2308 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2309 if (rcStrict == VINF_SUCCESS)
2310 *pu32 = (int8_t)u8;
2311 return rcStrict;
2312}
2313
2314
2315/**
2316 * Fetches the next signed byte from the opcode stream, extending it to
2317 * unsigned 32-bit.
2318 *
2319 * @returns Strict VBox status code.
2320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2321 * @param pu32 Where to return the unsigned dword.
2322 */
2323DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2324{
2325 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2326 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2327 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2328
2329 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2330 pVCpu->iem.s.offOpcode = offOpcode + 1;
2331 return VINF_SUCCESS;
2332}
2333
2334#endif /* !IEM_WITH_SETJMP */
2335
2336/**
2337 * Fetches the next signed byte from the opcode stream and sign-extending it to
2338 * a word, returning automatically on failure.
2339 *
2340 * @param a_pu32 Where to return the word.
2341 * @remark Implicitly references pVCpu.
2342 */
2343#ifndef IEM_WITH_SETJMP
2344#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2345 do \
2346 { \
2347 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2348 if (rcStrict2 != VINF_SUCCESS) \
2349 return rcStrict2; \
2350 } while (0)
2351#else
2352# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2353#endif
2354
2355#ifndef IEM_WITH_SETJMP
2356
2357/**
2358 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2359 *
2360 * @returns Strict VBox status code.
2361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2362 * @param pu64 Where to return the opcode qword.
2363 */
2364DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2365{
2366 uint8_t u8;
2367 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2368 if (rcStrict == VINF_SUCCESS)
2369 *pu64 = (int8_t)u8;
2370 return rcStrict;
2371}
2372
2373
2374/**
2375 * Fetches the next signed byte from the opcode stream, extending it to
2376 * unsigned 64-bit.
2377 *
2378 * @returns Strict VBox status code.
2379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2380 * @param pu64 Where to return the unsigned qword.
2381 */
2382DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2383{
2384 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2385 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2386 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2387
2388 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2389 pVCpu->iem.s.offOpcode = offOpcode + 1;
2390 return VINF_SUCCESS;
2391}
2392
2393#endif /* !IEM_WITH_SETJMP */
2394
2395
2396/**
2397 * Fetches the next signed byte from the opcode stream and sign-extending it to
2398 * a word, returning automatically on failure.
2399 *
2400 * @param a_pu64 Where to return the word.
2401 * @remark Implicitly references pVCpu.
2402 */
2403#ifndef IEM_WITH_SETJMP
2404# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2405 do \
2406 { \
2407 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2408 if (rcStrict2 != VINF_SUCCESS) \
2409 return rcStrict2; \
2410 } while (0)
2411#else
2412# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2413#endif
2414
2415
2416#ifndef IEM_WITH_SETJMP
2417
2418/**
2419 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2420 *
2421 * @returns Strict VBox status code.
2422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2423 * @param pu16 Where to return the opcode word.
2424 */
2425DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2426{
2427 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2428 if (rcStrict == VINF_SUCCESS)
2429 {
2430 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2431# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2432 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2433# else
2434 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2435# endif
2436 pVCpu->iem.s.offOpcode = offOpcode + 2;
2437 }
2438 else
2439 *pu16 = 0;
2440 return rcStrict;
2441}
2442
2443
2444/**
2445 * Fetches the next opcode word.
2446 *
2447 * @returns Strict VBox status code.
2448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2449 * @param pu16 Where to return the opcode word.
2450 */
2451DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2452{
2453 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2454 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2455 {
2456 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2457# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2458 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2459# else
2460 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2461# endif
2462 return VINF_SUCCESS;
2463 }
2464 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2465}
2466
2467#else /* IEM_WITH_SETJMP */
2468
2469/**
2470 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2471 *
2472 * @returns The opcode word.
2473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2474 */
2475DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2476{
2477# ifdef IEM_WITH_CODE_TLB
2478 uint16_t u16;
2479 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2480 return u16;
2481# else
2482 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2483 if (rcStrict == VINF_SUCCESS)
2484 {
2485 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2486 pVCpu->iem.s.offOpcode += 2;
2487# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2488 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2489# else
2490 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2491# endif
2492 }
2493 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2494# endif
2495}
2496
2497
2498/**
2499 * Fetches the next opcode word, longjmp on error.
2500 *
2501 * @returns The opcode word.
2502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2503 */
2504DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2505{
2506# ifdef IEM_WITH_CODE_TLB
2507 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2508 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2509 if (RT_LIKELY( pbBuf != NULL
2510 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2511 {
2512 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2513# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2514 return *(uint16_t const *)&pbBuf[offBuf];
2515# else
2516 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2517# endif
2518 }
2519# else
2520 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2521 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2522 {
2523 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2524# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2525 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2526# else
2527 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2528# endif
2529 }
2530# endif
2531 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2532}
2533
2534#endif /* IEM_WITH_SETJMP */
2535
2536
2537/**
2538 * Fetches the next opcode word, returns automatically on failure.
2539 *
2540 * @param a_pu16 Where to return the opcode word.
2541 * @remark Implicitly references pVCpu.
2542 */
2543#ifndef IEM_WITH_SETJMP
2544# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2545 do \
2546 { \
2547 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2548 if (rcStrict2 != VINF_SUCCESS) \
2549 return rcStrict2; \
2550 } while (0)
2551#else
2552# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2553#endif
2554
2555#ifndef IEM_WITH_SETJMP
2556
2557/**
2558 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2559 *
2560 * @returns Strict VBox status code.
2561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2562 * @param pu32 Where to return the opcode double word.
2563 */
2564DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2565{
2566 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2567 if (rcStrict == VINF_SUCCESS)
2568 {
2569 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2570 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2571 pVCpu->iem.s.offOpcode = offOpcode + 2;
2572 }
2573 else
2574 *pu32 = 0;
2575 return rcStrict;
2576}
2577
2578
2579/**
2580 * Fetches the next opcode word, zero extending it to a double word.
2581 *
2582 * @returns Strict VBox status code.
2583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2584 * @param pu32 Where to return the opcode double word.
2585 */
2586DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2587{
2588 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2589 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2590 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2591
2592 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2593 pVCpu->iem.s.offOpcode = offOpcode + 2;
2594 return VINF_SUCCESS;
2595}
2596
2597#endif /* !IEM_WITH_SETJMP */
2598
2599
2600/**
2601 * Fetches the next opcode word and zero extends it to a double word, returns
2602 * automatically on failure.
2603 *
2604 * @param a_pu32 Where to return the opcode double word.
2605 * @remark Implicitly references pVCpu.
2606 */
2607#ifndef IEM_WITH_SETJMP
2608# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2609 do \
2610 { \
2611 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2612 if (rcStrict2 != VINF_SUCCESS) \
2613 return rcStrict2; \
2614 } while (0)
2615#else
2616# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2617#endif
2618
2619#ifndef IEM_WITH_SETJMP
2620
2621/**
2622 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2623 *
2624 * @returns Strict VBox status code.
2625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2626 * @param pu64 Where to return the opcode quad word.
2627 */
2628DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2629{
2630 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2631 if (rcStrict == VINF_SUCCESS)
2632 {
2633 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2634 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2635 pVCpu->iem.s.offOpcode = offOpcode + 2;
2636 }
2637 else
2638 *pu64 = 0;
2639 return rcStrict;
2640}
2641
2642
2643/**
2644 * Fetches the next opcode word, zero extending it to a quad word.
2645 *
2646 * @returns Strict VBox status code.
2647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2648 * @param pu64 Where to return the opcode quad word.
2649 */
2650DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2651{
2652 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2653 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2654 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2655
2656 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2657 pVCpu->iem.s.offOpcode = offOpcode + 2;
2658 return VINF_SUCCESS;
2659}
2660
2661#endif /* !IEM_WITH_SETJMP */
2662
2663/**
2664 * Fetches the next opcode word and zero extends it to a quad word, returns
2665 * automatically on failure.
2666 *
2667 * @param a_pu64 Where to return the opcode quad word.
2668 * @remark Implicitly references pVCpu.
2669 */
2670#ifndef IEM_WITH_SETJMP
2671# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2672 do \
2673 { \
2674 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2675 if (rcStrict2 != VINF_SUCCESS) \
2676 return rcStrict2; \
2677 } while (0)
2678#else
2679# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2680#endif
2681
2682
2683#ifndef IEM_WITH_SETJMP
2684/**
2685 * Fetches the next signed word from the opcode stream.
2686 *
2687 * @returns Strict VBox status code.
2688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2689 * @param pi16 Where to return the signed word.
2690 */
2691DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2692{
2693 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2694}
2695#endif /* !IEM_WITH_SETJMP */
2696
2697
2698/**
2699 * Fetches the next signed word from the opcode stream, returning automatically
2700 * on failure.
2701 *
2702 * @param a_pi16 Where to return the signed word.
2703 * @remark Implicitly references pVCpu.
2704 */
2705#ifndef IEM_WITH_SETJMP
2706# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2707 do \
2708 { \
2709 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2710 if (rcStrict2 != VINF_SUCCESS) \
2711 return rcStrict2; \
2712 } while (0)
2713#else
2714# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2715#endif
2716
2717#ifndef IEM_WITH_SETJMP
2718
2719/**
2720 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2721 *
2722 * @returns Strict VBox status code.
2723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2724 * @param pu32 Where to return the opcode dword.
2725 */
2726DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2727{
2728 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2729 if (rcStrict == VINF_SUCCESS)
2730 {
2731 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2732# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2733 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2734# else
2735 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2736 pVCpu->iem.s.abOpcode[offOpcode + 1],
2737 pVCpu->iem.s.abOpcode[offOpcode + 2],
2738 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2739# endif
2740 pVCpu->iem.s.offOpcode = offOpcode + 4;
2741 }
2742 else
2743 *pu32 = 0;
2744 return rcStrict;
2745}
2746
2747
2748/**
2749 * Fetches the next opcode dword.
2750 *
2751 * @returns Strict VBox status code.
2752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2753 * @param pu32 Where to return the opcode double word.
2754 */
2755DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2756{
2757 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2758 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2759 {
2760 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2761# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2762 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2763# else
2764 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2765 pVCpu->iem.s.abOpcode[offOpcode + 1],
2766 pVCpu->iem.s.abOpcode[offOpcode + 2],
2767 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2768# endif
2769 return VINF_SUCCESS;
2770 }
2771 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2772}
2773
2774#else /* !IEM_WITH_SETJMP */
2775
2776/**
2777 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2778 *
2779 * @returns The opcode dword.
2780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2781 */
2782DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2783{
2784# ifdef IEM_WITH_CODE_TLB
2785 uint32_t u32;
2786 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2787 return u32;
2788# else
2789 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2790 if (rcStrict == VINF_SUCCESS)
2791 {
2792 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2793 pVCpu->iem.s.offOpcode = offOpcode + 4;
2794# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2795 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2796# else
2797 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2798 pVCpu->iem.s.abOpcode[offOpcode + 1],
2799 pVCpu->iem.s.abOpcode[offOpcode + 2],
2800 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2801# endif
2802 }
2803 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2804# endif
2805}
2806
2807
2808/**
2809 * Fetches the next opcode dword, longjmp on error.
2810 *
2811 * @returns The opcode dword.
2812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2813 */
2814DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2815{
2816# ifdef IEM_WITH_CODE_TLB
2817 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2818 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2819 if (RT_LIKELY( pbBuf != NULL
2820 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2821 {
2822 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2823# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2824 return *(uint32_t const *)&pbBuf[offBuf];
2825# else
2826 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2827 pbBuf[offBuf + 1],
2828 pbBuf[offBuf + 2],
2829 pbBuf[offBuf + 3]);
2830# endif
2831 }
2832# else
2833 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2834 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2835 {
2836 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2837# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2838 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2839# else
2840 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2841 pVCpu->iem.s.abOpcode[offOpcode + 1],
2842 pVCpu->iem.s.abOpcode[offOpcode + 2],
2843 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2844# endif
2845 }
2846# endif
2847 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2848}
2849
2850#endif /* !IEM_WITH_SETJMP */
2851
2852
2853/**
2854 * Fetches the next opcode dword, returns automatically on failure.
2855 *
2856 * @param a_pu32 Where to return the opcode dword.
2857 * @remark Implicitly references pVCpu.
2858 */
2859#ifndef IEM_WITH_SETJMP
2860# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2861 do \
2862 { \
2863 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2864 if (rcStrict2 != VINF_SUCCESS) \
2865 return rcStrict2; \
2866 } while (0)
2867#else
2868# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2869#endif
2870
2871#ifndef IEM_WITH_SETJMP
2872
2873/**
2874 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2875 *
2876 * @returns Strict VBox status code.
2877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2878 * @param pu64 Where to return the opcode dword.
2879 */
2880DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2881{
2882 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2883 if (rcStrict == VINF_SUCCESS)
2884 {
2885 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2886 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2887 pVCpu->iem.s.abOpcode[offOpcode + 1],
2888 pVCpu->iem.s.abOpcode[offOpcode + 2],
2889 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2890 pVCpu->iem.s.offOpcode = offOpcode + 4;
2891 }
2892 else
2893 *pu64 = 0;
2894 return rcStrict;
2895}
2896
2897
2898/**
2899 * Fetches the next opcode dword, zero extending it to a quad word.
2900 *
2901 * @returns Strict VBox status code.
2902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2903 * @param pu64 Where to return the opcode quad word.
2904 */
2905DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2906{
2907 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2908 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2909 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2910
2911 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2912 pVCpu->iem.s.abOpcode[offOpcode + 1],
2913 pVCpu->iem.s.abOpcode[offOpcode + 2],
2914 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2915 pVCpu->iem.s.offOpcode = offOpcode + 4;
2916 return VINF_SUCCESS;
2917}
2918
2919#endif /* !IEM_WITH_SETJMP */
2920
2921
2922/**
2923 * Fetches the next opcode dword and zero extends it to a quad word, returns
2924 * automatically on failure.
2925 *
2926 * @param a_pu64 Where to return the opcode quad word.
2927 * @remark Implicitly references pVCpu.
2928 */
2929#ifndef IEM_WITH_SETJMP
2930# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2931 do \
2932 { \
2933 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2934 if (rcStrict2 != VINF_SUCCESS) \
2935 return rcStrict2; \
2936 } while (0)
2937#else
2938# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2939#endif
2940
2941
2942#ifndef IEM_WITH_SETJMP
2943/**
2944 * Fetches the next signed double word from the opcode stream.
2945 *
2946 * @returns Strict VBox status code.
2947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2948 * @param pi32 Where to return the signed double word.
2949 */
2950DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2951{
2952 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2953}
2954#endif
2955
2956/**
2957 * Fetches the next signed double word from the opcode stream, returning
2958 * automatically on failure.
2959 *
2960 * @param a_pi32 Where to return the signed double word.
2961 * @remark Implicitly references pVCpu.
2962 */
2963#ifndef IEM_WITH_SETJMP
2964# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2965 do \
2966 { \
2967 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2968 if (rcStrict2 != VINF_SUCCESS) \
2969 return rcStrict2; \
2970 } while (0)
2971#else
2972# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2973#endif
2974
2975#ifndef IEM_WITH_SETJMP
2976
2977/**
2978 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2979 *
2980 * @returns Strict VBox status code.
2981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2982 * @param pu64 Where to return the opcode qword.
2983 */
2984DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2985{
2986 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2987 if (rcStrict == VINF_SUCCESS)
2988 {
2989 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2990 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2991 pVCpu->iem.s.abOpcode[offOpcode + 1],
2992 pVCpu->iem.s.abOpcode[offOpcode + 2],
2993 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2994 pVCpu->iem.s.offOpcode = offOpcode + 4;
2995 }
2996 else
2997 *pu64 = 0;
2998 return rcStrict;
2999}
3000
3001
3002/**
3003 * Fetches the next opcode dword, sign extending it into a quad word.
3004 *
3005 * @returns Strict VBox status code.
3006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3007 * @param pu64 Where to return the opcode quad word.
3008 */
3009DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3010{
3011 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3012 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3013 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3014
3015 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3016 pVCpu->iem.s.abOpcode[offOpcode + 1],
3017 pVCpu->iem.s.abOpcode[offOpcode + 2],
3018 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3019 *pu64 = i32;
3020 pVCpu->iem.s.offOpcode = offOpcode + 4;
3021 return VINF_SUCCESS;
3022}
3023
3024#endif /* !IEM_WITH_SETJMP */
3025
3026
3027/**
3028 * Fetches the next opcode double word and sign extends it to a quad word,
3029 * returns automatically on failure.
3030 *
3031 * @param a_pu64 Where to return the opcode quad word.
3032 * @remark Implicitly references pVCpu.
3033 */
3034#ifndef IEM_WITH_SETJMP
3035# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3036 do \
3037 { \
3038 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3039 if (rcStrict2 != VINF_SUCCESS) \
3040 return rcStrict2; \
3041 } while (0)
3042#else
3043# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3044#endif
3045
3046#ifndef IEM_WITH_SETJMP
3047
3048/**
3049 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3050 *
3051 * @returns Strict VBox status code.
3052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3053 * @param pu64 Where to return the opcode qword.
3054 */
3055DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3056{
3057 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3058 if (rcStrict == VINF_SUCCESS)
3059 {
3060 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3061# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3062 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3063# else
3064 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3065 pVCpu->iem.s.abOpcode[offOpcode + 1],
3066 pVCpu->iem.s.abOpcode[offOpcode + 2],
3067 pVCpu->iem.s.abOpcode[offOpcode + 3],
3068 pVCpu->iem.s.abOpcode[offOpcode + 4],
3069 pVCpu->iem.s.abOpcode[offOpcode + 5],
3070 pVCpu->iem.s.abOpcode[offOpcode + 6],
3071 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3072# endif
3073 pVCpu->iem.s.offOpcode = offOpcode + 8;
3074 }
3075 else
3076 *pu64 = 0;
3077 return rcStrict;
3078}
3079
3080
3081/**
3082 * Fetches the next opcode qword.
3083 *
3084 * @returns Strict VBox status code.
3085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3086 * @param pu64 Where to return the opcode qword.
3087 */
3088DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3089{
3090 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3091 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3092 {
3093# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3094 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3095# else
3096 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3097 pVCpu->iem.s.abOpcode[offOpcode + 1],
3098 pVCpu->iem.s.abOpcode[offOpcode + 2],
3099 pVCpu->iem.s.abOpcode[offOpcode + 3],
3100 pVCpu->iem.s.abOpcode[offOpcode + 4],
3101 pVCpu->iem.s.abOpcode[offOpcode + 5],
3102 pVCpu->iem.s.abOpcode[offOpcode + 6],
3103 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3104# endif
3105 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3106 return VINF_SUCCESS;
3107 }
3108 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3109}
3110
3111#else /* IEM_WITH_SETJMP */
3112
3113/**
3114 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3115 *
3116 * @returns The opcode qword.
3117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3118 */
3119DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3120{
3121# ifdef IEM_WITH_CODE_TLB
3122 uint64_t u64;
3123 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3124 return u64;
3125# else
3126 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3127 if (rcStrict == VINF_SUCCESS)
3128 {
3129 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3130 pVCpu->iem.s.offOpcode = offOpcode + 8;
3131# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3132 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3133# else
3134 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3135 pVCpu->iem.s.abOpcode[offOpcode + 1],
3136 pVCpu->iem.s.abOpcode[offOpcode + 2],
3137 pVCpu->iem.s.abOpcode[offOpcode + 3],
3138 pVCpu->iem.s.abOpcode[offOpcode + 4],
3139 pVCpu->iem.s.abOpcode[offOpcode + 5],
3140 pVCpu->iem.s.abOpcode[offOpcode + 6],
3141 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3142# endif
3143 }
3144 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3145# endif
3146}
3147
3148
3149/**
3150 * Fetches the next opcode qword, longjmp on error.
3151 *
3152 * @returns The opcode qword.
3153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3154 */
3155DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3156{
3157# ifdef IEM_WITH_CODE_TLB
3158 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3159 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3160 if (RT_LIKELY( pbBuf != NULL
3161 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3162 {
3163 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3164# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3165 return *(uint64_t const *)&pbBuf[offBuf];
3166# else
3167 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3168 pbBuf[offBuf + 1],
3169 pbBuf[offBuf + 2],
3170 pbBuf[offBuf + 3],
3171 pbBuf[offBuf + 4],
3172 pbBuf[offBuf + 5],
3173 pbBuf[offBuf + 6],
3174 pbBuf[offBuf + 7]);
3175# endif
3176 }
3177# else
3178 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3179 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3180 {
3181 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3182# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3183 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3184# else
3185 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3186 pVCpu->iem.s.abOpcode[offOpcode + 1],
3187 pVCpu->iem.s.abOpcode[offOpcode + 2],
3188 pVCpu->iem.s.abOpcode[offOpcode + 3],
3189 pVCpu->iem.s.abOpcode[offOpcode + 4],
3190 pVCpu->iem.s.abOpcode[offOpcode + 5],
3191 pVCpu->iem.s.abOpcode[offOpcode + 6],
3192 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3193# endif
3194 }
3195# endif
3196 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3197}
3198
3199#endif /* IEM_WITH_SETJMP */
3200
3201/**
3202 * Fetches the next opcode quad word, returns automatically on failure.
3203 *
3204 * @param a_pu64 Where to return the opcode quad word.
3205 * @remark Implicitly references pVCpu.
3206 */
3207#ifndef IEM_WITH_SETJMP
3208# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3209 do \
3210 { \
3211 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3212 if (rcStrict2 != VINF_SUCCESS) \
3213 return rcStrict2; \
3214 } while (0)
3215#else
3216# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3217#endif
3218
3219
3220/** @name Misc Worker Functions.
3221 * @{
3222 */
3223
3224/**
3225 * Gets the exception class for the specified exception vector.
3226 *
3227 * @returns The class of the specified exception.
3228 * @param uVector The exception vector.
3229 */
3230IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3231{
3232 Assert(uVector <= X86_XCPT_LAST);
3233 switch (uVector)
3234 {
3235 case X86_XCPT_DE:
3236 case X86_XCPT_TS:
3237 case X86_XCPT_NP:
3238 case X86_XCPT_SS:
3239 case X86_XCPT_GP:
3240 case X86_XCPT_SX: /* AMD only */
3241 return IEMXCPTCLASS_CONTRIBUTORY;
3242
3243 case X86_XCPT_PF:
3244 case X86_XCPT_VE: /* Intel only */
3245 return IEMXCPTCLASS_PAGE_FAULT;
3246 }
3247 return IEMXCPTCLASS_BENIGN;
3248}
3249
3250
3251/**
3252 * Evaluates how to handle an exception caused during delivery of another event
3253 * (exception / interrupt).
3254 *
3255 * @returns How to handle the recursive exception.
3256 * @param pVCpu The cross context virtual CPU structure of the
3257 * calling thread.
3258 * @param fPrevFlags The flags of the previous event.
3259 * @param uPrevVector The vector of the previous event.
3260 * @param fCurFlags The flags of the current exception.
3261 * @param uCurVector The vector of the current exception.
3262 * @param pfXcptRaiseInfo Where to store additional information about the
3263 * exception condition. Optional.
3264 */
3265VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3266 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3267{
3268 /*
3269 * Only CPU exceptions can be raised while delivering other events, software interrupt
3270 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3271 */
3272 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3273 Assert(pVCpu); RT_NOREF(pVCpu);
3274
3275 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3276 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3277 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3278 {
3279 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3280 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3281 {
3282 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3283 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3284 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3285 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3286 {
3287 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3288 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3289 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3290 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3291 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3292 }
3293 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3294 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3295 {
3296 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3297 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%u uCurVector=%u -> #DF\n", uPrevVector, uCurVector));
3298 }
3299 else if ( uPrevVector == X86_XCPT_DF
3300 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3301 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3302 {
3303 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3304 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3305 }
3306 }
3307 else
3308 {
3309 if (uPrevVector == X86_XCPT_NMI)
3310 {
3311 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3312 if (uCurVector == X86_XCPT_PF)
3313 {
3314 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3315 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3316 }
3317 }
3318 else if ( uPrevVector == X86_XCPT_AC
3319 && uCurVector == X86_XCPT_AC)
3320 {
3321 enmRaise = IEMXCPTRAISE_CPU_HANG;
3322 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3323 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3324 }
3325 }
3326 }
3327 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3328 {
3329 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3330 if (uCurVector == X86_XCPT_PF)
3331 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3332 }
3333 else
3334 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3335
3336 if (pfXcptRaiseInfo)
3337 *pfXcptRaiseInfo = fRaiseInfo;
3338 return enmRaise;
3339}
3340
3341
3342/**
3343 * Enters the CPU shutdown state initiated by a triple fault or other
3344 * unrecoverable conditions.
3345 *
3346 * @returns Strict VBox status code.
3347 * @param pVCpu The cross context virtual CPU structure of the
3348 * calling thread.
3349 */
3350IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3351{
3352 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3353 {
3354 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3355 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3356 }
3357
3358 RT_NOREF(pVCpu);
3359 return VINF_EM_TRIPLE_FAULT;
3360}
3361
3362
3363#ifdef VBOX_WITH_NESTED_HWVIRT
3364IEM_STATIC VBOXSTRICTRC iemHandleSvmNstGstEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
3365 uint32_t uErr, uint64_t uCr2)
3366{
3367 Assert(IEM_IS_SVM_ENABLED(pVCpu));
3368
3369 /*
3370 * Handle nested-guest SVM exception and software interrupt intercepts,
3371 * see AMD spec. 15.12 "Exception Intercepts".
3372 *
3373 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.
3374 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
3375 * even when they use a vector in the range 0 to 31.
3376 * - ICEBP should not trigger #DB intercept, but its own intercept.
3377 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
3378 */
3379 /* Check NMI intercept */
3380 if ( u8Vector == X86_XCPT_NMI
3381 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3382 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
3383 {
3384 Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));
3385 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3386 }
3387
3388 /* Check ICEBP intercept. */
3389 if ( (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
3390 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_ICEBP))
3391 {
3392 Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));
3393 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3394 }
3395
3396 /* Check CPU exception intercepts. */
3397 if ( (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3398 && IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector))
3399 {
3400 Assert(u8Vector <= X86_XCPT_LAST);
3401 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
3402 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
3403 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist
3404 && u8Vector == X86_XCPT_PF
3405 && !(uErr & X86_TRAP_PF_ID))
3406 {
3407 /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */
3408#ifdef IEM_WITH_CODE_TLB
3409#else
3410 uint8_t const offOpCode = pVCpu->iem.s.offOpcode;
3411 uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;
3412 if ( cbCurrent > 0
3413 && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr))
3414 {
3415 Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));
3416 memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);
3417 }
3418#endif
3419 }
3420 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept. u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n",
3421 u8Vector, uExitInfo1, uExitInfo2));
3422 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);
3423 }
3424
3425 /* Check software interrupt (INTn) intercepts. */
3426 if ( (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3427 | IEM_XCPT_FLAGS_BP_INSTR
3428 | IEM_XCPT_FLAGS_ICEBP_INSTR
3429 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3430 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN))
3431 {
3432 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0;
3433 Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
3434 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
3435 }
3436
3437 return VINF_HM_INTERCEPT_NOT_ACTIVE;
3438}
3439#endif
3440
3441/**
3442 * Validates a new SS segment.
3443 *
3444 * @returns VBox strict status code.
3445 * @param pVCpu The cross context virtual CPU structure of the
3446 * calling thread.
3447 * @param pCtx The CPU context.
3448 * @param NewSS The new SS selctor.
3449 * @param uCpl The CPL to load the stack for.
3450 * @param pDesc Where to return the descriptor.
3451 */
3452IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3453{
3454 NOREF(pCtx);
3455
3456 /* Null selectors are not allowed (we're not called for dispatching
3457 interrupts with SS=0 in long mode). */
3458 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3459 {
3460 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3461 return iemRaiseTaskSwitchFault0(pVCpu);
3462 }
3463
3464 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3465 if ((NewSS & X86_SEL_RPL) != uCpl)
3466 {
3467 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3468 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3469 }
3470
3471 /*
3472 * Read the descriptor.
3473 */
3474 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3475 if (rcStrict != VINF_SUCCESS)
3476 return rcStrict;
3477
3478 /*
3479 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3480 */
3481 if (!pDesc->Legacy.Gen.u1DescType)
3482 {
3483 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3484 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3485 }
3486
3487 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3488 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3489 {
3490 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3491 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3492 }
3493 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3494 {
3495 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3496 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3497 }
3498
3499 /* Is it there? */
3500 /** @todo testcase: Is this checked before the canonical / limit check below? */
3501 if (!pDesc->Legacy.Gen.u1Present)
3502 {
3503 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3504 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3505 }
3506
3507 return VINF_SUCCESS;
3508}
3509
3510
3511/**
3512 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3513 * not.
3514 *
3515 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3516 * @param a_pCtx The CPU context.
3517 */
3518#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3519# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3520 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3521 ? (a_pCtx)->eflags.u \
3522 : CPUMRawGetEFlags(a_pVCpu) )
3523#else
3524# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3525 ( (a_pCtx)->eflags.u )
3526#endif
3527
3528/**
3529 * Updates the EFLAGS in the correct manner wrt. PATM.
3530 *
3531 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3532 * @param a_pCtx The CPU context.
3533 * @param a_fEfl The new EFLAGS.
3534 */
3535#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3536# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3537 do { \
3538 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3539 (a_pCtx)->eflags.u = (a_fEfl); \
3540 else \
3541 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3542 } while (0)
3543#else
3544# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3545 do { \
3546 (a_pCtx)->eflags.u = (a_fEfl); \
3547 } while (0)
3548#endif
3549
3550
3551/** @} */
3552
3553/** @name Raising Exceptions.
3554 *
3555 * @{
3556 */
3557
3558
3559/**
3560 * Loads the specified stack far pointer from the TSS.
3561 *
3562 * @returns VBox strict status code.
3563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3564 * @param pCtx The CPU context.
3565 * @param uCpl The CPL to load the stack for.
3566 * @param pSelSS Where to return the new stack segment.
3567 * @param puEsp Where to return the new stack pointer.
3568 */
3569IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3570 PRTSEL pSelSS, uint32_t *puEsp)
3571{
3572 VBOXSTRICTRC rcStrict;
3573 Assert(uCpl < 4);
3574
3575 switch (pCtx->tr.Attr.n.u4Type)
3576 {
3577 /*
3578 * 16-bit TSS (X86TSS16).
3579 */
3580 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); /* fall thru */
3581 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3582 {
3583 uint32_t off = uCpl * 4 + 2;
3584 if (off + 4 <= pCtx->tr.u32Limit)
3585 {
3586 /** @todo check actual access pattern here. */
3587 uint32_t u32Tmp = 0; /* gcc maybe... */
3588 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3589 if (rcStrict == VINF_SUCCESS)
3590 {
3591 *puEsp = RT_LOWORD(u32Tmp);
3592 *pSelSS = RT_HIWORD(u32Tmp);
3593 return VINF_SUCCESS;
3594 }
3595 }
3596 else
3597 {
3598 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3599 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3600 }
3601 break;
3602 }
3603
3604 /*
3605 * 32-bit TSS (X86TSS32).
3606 */
3607 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); /* fall thru */
3608 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3609 {
3610 uint32_t off = uCpl * 8 + 4;
3611 if (off + 7 <= pCtx->tr.u32Limit)
3612 {
3613/** @todo check actual access pattern here. */
3614 uint64_t u64Tmp;
3615 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3616 if (rcStrict == VINF_SUCCESS)
3617 {
3618 *puEsp = u64Tmp & UINT32_MAX;
3619 *pSelSS = (RTSEL)(u64Tmp >> 32);
3620 return VINF_SUCCESS;
3621 }
3622 }
3623 else
3624 {
3625 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3626 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3627 }
3628 break;
3629 }
3630
3631 default:
3632 AssertFailed();
3633 rcStrict = VERR_IEM_IPE_4;
3634 break;
3635 }
3636
3637 *puEsp = 0; /* make gcc happy */
3638 *pSelSS = 0; /* make gcc happy */
3639 return rcStrict;
3640}
3641
3642
3643/**
3644 * Loads the specified stack pointer from the 64-bit TSS.
3645 *
3646 * @returns VBox strict status code.
3647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3648 * @param pCtx The CPU context.
3649 * @param uCpl The CPL to load the stack for.
3650 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3651 * @param puRsp Where to return the new stack pointer.
3652 */
3653IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3654{
3655 Assert(uCpl < 4);
3656 Assert(uIst < 8);
3657 *puRsp = 0; /* make gcc happy */
3658
3659 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3660
3661 uint32_t off;
3662 if (uIst)
3663 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3664 else
3665 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3666 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3667 {
3668 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3669 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3670 }
3671
3672 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3673}
3674
3675
3676/**
3677 * Adjust the CPU state according to the exception being raised.
3678 *
3679 * @param pCtx The CPU context.
3680 * @param u8Vector The exception that has been raised.
3681 */
3682DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3683{
3684 switch (u8Vector)
3685 {
3686 case X86_XCPT_DB:
3687 pCtx->dr[7] &= ~X86_DR7_GD;
3688 break;
3689 /** @todo Read the AMD and Intel exception reference... */
3690 }
3691}
3692
3693
3694/**
3695 * Implements exceptions and interrupts for real mode.
3696 *
3697 * @returns VBox strict status code.
3698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3699 * @param pCtx The CPU context.
3700 * @param cbInstr The number of bytes to offset rIP by in the return
3701 * address.
3702 * @param u8Vector The interrupt / exception vector number.
3703 * @param fFlags The flags.
3704 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3705 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3706 */
3707IEM_STATIC VBOXSTRICTRC
3708iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3709 PCPUMCTX pCtx,
3710 uint8_t cbInstr,
3711 uint8_t u8Vector,
3712 uint32_t fFlags,
3713 uint16_t uErr,
3714 uint64_t uCr2)
3715{
3716 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3717 NOREF(uErr); NOREF(uCr2);
3718
3719 /*
3720 * Read the IDT entry.
3721 */
3722 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3723 {
3724 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3725 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3726 }
3727 RTFAR16 Idte;
3728 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3729 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3730 return rcStrict;
3731
3732 /*
3733 * Push the stack frame.
3734 */
3735 uint16_t *pu16Frame;
3736 uint64_t uNewRsp;
3737 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3738 if (rcStrict != VINF_SUCCESS)
3739 return rcStrict;
3740
3741 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3742#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3743 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3744 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3745 fEfl |= UINT16_C(0xf000);
3746#endif
3747 pu16Frame[2] = (uint16_t)fEfl;
3748 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3749 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3750 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3751 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3752 return rcStrict;
3753
3754 /*
3755 * Load the vector address into cs:ip and make exception specific state
3756 * adjustments.
3757 */
3758 pCtx->cs.Sel = Idte.sel;
3759 pCtx->cs.ValidSel = Idte.sel;
3760 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3761 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3762 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3763 pCtx->rip = Idte.off;
3764 fEfl &= ~X86_EFL_IF;
3765 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3766
3767 /** @todo do we actually do this in real mode? */
3768 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3769 iemRaiseXcptAdjustState(pCtx, u8Vector);
3770
3771 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3772}
3773
3774
3775/**
3776 * Loads a NULL data selector into when coming from V8086 mode.
3777 *
3778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3779 * @param pSReg Pointer to the segment register.
3780 */
3781IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3782{
3783 pSReg->Sel = 0;
3784 pSReg->ValidSel = 0;
3785 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3786 {
3787 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3788 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3789 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3790 }
3791 else
3792 {
3793 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3794 /** @todo check this on AMD-V */
3795 pSReg->u64Base = 0;
3796 pSReg->u32Limit = 0;
3797 }
3798}
3799
3800
3801/**
3802 * Loads a segment selector during a task switch in V8086 mode.
3803 *
3804 * @param pSReg Pointer to the segment register.
3805 * @param uSel The selector value to load.
3806 */
3807IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3808{
3809 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3810 pSReg->Sel = uSel;
3811 pSReg->ValidSel = uSel;
3812 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3813 pSReg->u64Base = uSel << 4;
3814 pSReg->u32Limit = 0xffff;
3815 pSReg->Attr.u = 0xf3;
3816}
3817
3818
3819/**
3820 * Loads a NULL data selector into a selector register, both the hidden and
3821 * visible parts, in protected mode.
3822 *
3823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3824 * @param pSReg Pointer to the segment register.
3825 * @param uRpl The RPL.
3826 */
3827IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3828{
3829 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3830 * data selector in protected mode. */
3831 pSReg->Sel = uRpl;
3832 pSReg->ValidSel = uRpl;
3833 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3834 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3835 {
3836 /* VT-x (Intel 3960x) observed doing something like this. */
3837 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3838 pSReg->u32Limit = UINT32_MAX;
3839 pSReg->u64Base = 0;
3840 }
3841 else
3842 {
3843 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3844 pSReg->u32Limit = 0;
3845 pSReg->u64Base = 0;
3846 }
3847}
3848
3849
3850/**
3851 * Loads a segment selector during a task switch in protected mode.
3852 *
3853 * In this task switch scenario, we would throw \#TS exceptions rather than
3854 * \#GPs.
3855 *
3856 * @returns VBox strict status code.
3857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3858 * @param pSReg Pointer to the segment register.
3859 * @param uSel The new selector value.
3860 *
3861 * @remarks This does _not_ handle CS or SS.
3862 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3863 */
3864IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3865{
3866 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3867
3868 /* Null data selector. */
3869 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3870 {
3871 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3872 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3873 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3874 return VINF_SUCCESS;
3875 }
3876
3877 /* Fetch the descriptor. */
3878 IEMSELDESC Desc;
3879 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3880 if (rcStrict != VINF_SUCCESS)
3881 {
3882 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3883 VBOXSTRICTRC_VAL(rcStrict)));
3884 return rcStrict;
3885 }
3886
3887 /* Must be a data segment or readable code segment. */
3888 if ( !Desc.Legacy.Gen.u1DescType
3889 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3890 {
3891 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3892 Desc.Legacy.Gen.u4Type));
3893 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3894 }
3895
3896 /* Check privileges for data segments and non-conforming code segments. */
3897 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3898 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3899 {
3900 /* The RPL and the new CPL must be less than or equal to the DPL. */
3901 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3902 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3903 {
3904 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3905 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3906 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3907 }
3908 }
3909
3910 /* Is it there? */
3911 if (!Desc.Legacy.Gen.u1Present)
3912 {
3913 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3914 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3915 }
3916
3917 /* The base and limit. */
3918 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3919 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3920
3921 /*
3922 * Ok, everything checked out fine. Now set the accessed bit before
3923 * committing the result into the registers.
3924 */
3925 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3926 {
3927 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3928 if (rcStrict != VINF_SUCCESS)
3929 return rcStrict;
3930 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3931 }
3932
3933 /* Commit */
3934 pSReg->Sel = uSel;
3935 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3936 pSReg->u32Limit = cbLimit;
3937 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3938 pSReg->ValidSel = uSel;
3939 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3940 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3941 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3942
3943 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3944 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3945 return VINF_SUCCESS;
3946}
3947
3948
3949/**
3950 * Performs a task switch.
3951 *
3952 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3953 * caller is responsible for performing the necessary checks (like DPL, TSS
3954 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3955 * reference for JMP, CALL, IRET.
3956 *
3957 * If the task switch is the due to a software interrupt or hardware exception,
3958 * the caller is responsible for validating the TSS selector and descriptor. See
3959 * Intel Instruction reference for INT n.
3960 *
3961 * @returns VBox strict status code.
3962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3963 * @param pCtx The CPU context.
3964 * @param enmTaskSwitch What caused this task switch.
3965 * @param uNextEip The EIP effective after the task switch.
3966 * @param fFlags The flags.
3967 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3968 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3969 * @param SelTSS The TSS selector of the new task.
3970 * @param pNewDescTSS Pointer to the new TSS descriptor.
3971 */
3972IEM_STATIC VBOXSTRICTRC
3973iemTaskSwitch(PVMCPU pVCpu,
3974 PCPUMCTX pCtx,
3975 IEMTASKSWITCH enmTaskSwitch,
3976 uint32_t uNextEip,
3977 uint32_t fFlags,
3978 uint16_t uErr,
3979 uint64_t uCr2,
3980 RTSEL SelTSS,
3981 PIEMSELDESC pNewDescTSS)
3982{
3983 Assert(!IEM_IS_REAL_MODE(pVCpu));
3984 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3985
3986 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3987 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3988 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3989 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3990 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3991
3992 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3993 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3994
3995 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3996 fIsNewTSS386, pCtx->eip, uNextEip));
3997
3998 /* Update CR2 in case it's a page-fault. */
3999 /** @todo This should probably be done much earlier in IEM/PGM. See
4000 * @bugref{5653#c49}. */
4001 if (fFlags & IEM_XCPT_FLAGS_CR2)
4002 pCtx->cr2 = uCr2;
4003
4004 /*
4005 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4006 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4007 */
4008 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4009 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4010 if (uNewTSSLimit < uNewTSSLimitMin)
4011 {
4012 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4013 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4014 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4015 }
4016
4017 /*
4018 * Check the current TSS limit. The last written byte to the current TSS during the
4019 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4020 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4021 *
4022 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4023 * end up with smaller than "legal" TSS limits.
4024 */
4025 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
4026 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4027 if (uCurTSSLimit < uCurTSSLimitMin)
4028 {
4029 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4030 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4031 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4032 }
4033
4034 /*
4035 * Verify that the new TSS can be accessed and map it. Map only the required contents
4036 * and not the entire TSS.
4037 */
4038 void *pvNewTSS;
4039 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4040 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4041 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4042 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4043 * not perform correct translation if this happens. See Intel spec. 7.2.1
4044 * "Task-State Segment" */
4045 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4046 if (rcStrict != VINF_SUCCESS)
4047 {
4048 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4049 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4050 return rcStrict;
4051 }
4052
4053 /*
4054 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4055 */
4056 uint32_t u32EFlags = pCtx->eflags.u32;
4057 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4058 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4059 {
4060 PX86DESC pDescCurTSS;
4061 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4062 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4063 if (rcStrict != VINF_SUCCESS)
4064 {
4065 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4066 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4067 return rcStrict;
4068 }
4069
4070 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4071 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4072 if (rcStrict != VINF_SUCCESS)
4073 {
4074 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4075 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4076 return rcStrict;
4077 }
4078
4079 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4080 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4081 {
4082 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4083 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4084 u32EFlags &= ~X86_EFL_NT;
4085 }
4086 }
4087
4088 /*
4089 * Save the CPU state into the current TSS.
4090 */
4091 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4092 if (GCPtrNewTSS == GCPtrCurTSS)
4093 {
4094 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4095 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4096 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4097 }
4098 if (fIsNewTSS386)
4099 {
4100 /*
4101 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4102 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4103 */
4104 void *pvCurTSS32;
4105 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4106 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4107 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4108 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4109 if (rcStrict != VINF_SUCCESS)
4110 {
4111 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4112 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4113 return rcStrict;
4114 }
4115
4116 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4117 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4118 pCurTSS32->eip = uNextEip;
4119 pCurTSS32->eflags = u32EFlags;
4120 pCurTSS32->eax = pCtx->eax;
4121 pCurTSS32->ecx = pCtx->ecx;
4122 pCurTSS32->edx = pCtx->edx;
4123 pCurTSS32->ebx = pCtx->ebx;
4124 pCurTSS32->esp = pCtx->esp;
4125 pCurTSS32->ebp = pCtx->ebp;
4126 pCurTSS32->esi = pCtx->esi;
4127 pCurTSS32->edi = pCtx->edi;
4128 pCurTSS32->es = pCtx->es.Sel;
4129 pCurTSS32->cs = pCtx->cs.Sel;
4130 pCurTSS32->ss = pCtx->ss.Sel;
4131 pCurTSS32->ds = pCtx->ds.Sel;
4132 pCurTSS32->fs = pCtx->fs.Sel;
4133 pCurTSS32->gs = pCtx->gs.Sel;
4134
4135 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4136 if (rcStrict != VINF_SUCCESS)
4137 {
4138 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4139 VBOXSTRICTRC_VAL(rcStrict)));
4140 return rcStrict;
4141 }
4142 }
4143 else
4144 {
4145 /*
4146 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4147 */
4148 void *pvCurTSS16;
4149 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4150 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4151 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4152 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4153 if (rcStrict != VINF_SUCCESS)
4154 {
4155 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4156 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4157 return rcStrict;
4158 }
4159
4160 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4161 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4162 pCurTSS16->ip = uNextEip;
4163 pCurTSS16->flags = u32EFlags;
4164 pCurTSS16->ax = pCtx->ax;
4165 pCurTSS16->cx = pCtx->cx;
4166 pCurTSS16->dx = pCtx->dx;
4167 pCurTSS16->bx = pCtx->bx;
4168 pCurTSS16->sp = pCtx->sp;
4169 pCurTSS16->bp = pCtx->bp;
4170 pCurTSS16->si = pCtx->si;
4171 pCurTSS16->di = pCtx->di;
4172 pCurTSS16->es = pCtx->es.Sel;
4173 pCurTSS16->cs = pCtx->cs.Sel;
4174 pCurTSS16->ss = pCtx->ss.Sel;
4175 pCurTSS16->ds = pCtx->ds.Sel;
4176
4177 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4178 if (rcStrict != VINF_SUCCESS)
4179 {
4180 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4181 VBOXSTRICTRC_VAL(rcStrict)));
4182 return rcStrict;
4183 }
4184 }
4185
4186 /*
4187 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4188 */
4189 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4190 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4191 {
4192 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4193 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4194 pNewTSS->selPrev = pCtx->tr.Sel;
4195 }
4196
4197 /*
4198 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4199 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4200 */
4201 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4202 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4203 bool fNewDebugTrap;
4204 if (fIsNewTSS386)
4205 {
4206 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4207 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4208 uNewEip = pNewTSS32->eip;
4209 uNewEflags = pNewTSS32->eflags;
4210 uNewEax = pNewTSS32->eax;
4211 uNewEcx = pNewTSS32->ecx;
4212 uNewEdx = pNewTSS32->edx;
4213 uNewEbx = pNewTSS32->ebx;
4214 uNewEsp = pNewTSS32->esp;
4215 uNewEbp = pNewTSS32->ebp;
4216 uNewEsi = pNewTSS32->esi;
4217 uNewEdi = pNewTSS32->edi;
4218 uNewES = pNewTSS32->es;
4219 uNewCS = pNewTSS32->cs;
4220 uNewSS = pNewTSS32->ss;
4221 uNewDS = pNewTSS32->ds;
4222 uNewFS = pNewTSS32->fs;
4223 uNewGS = pNewTSS32->gs;
4224 uNewLdt = pNewTSS32->selLdt;
4225 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4226 }
4227 else
4228 {
4229 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4230 uNewCr3 = 0;
4231 uNewEip = pNewTSS16->ip;
4232 uNewEflags = pNewTSS16->flags;
4233 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4234 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4235 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4236 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4237 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4238 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4239 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4240 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4241 uNewES = pNewTSS16->es;
4242 uNewCS = pNewTSS16->cs;
4243 uNewSS = pNewTSS16->ss;
4244 uNewDS = pNewTSS16->ds;
4245 uNewFS = 0;
4246 uNewGS = 0;
4247 uNewLdt = pNewTSS16->selLdt;
4248 fNewDebugTrap = false;
4249 }
4250
4251 if (GCPtrNewTSS == GCPtrCurTSS)
4252 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4253 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4254
4255 /*
4256 * We're done accessing the new TSS.
4257 */
4258 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4259 if (rcStrict != VINF_SUCCESS)
4260 {
4261 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4262 return rcStrict;
4263 }
4264
4265 /*
4266 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4267 */
4268 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4269 {
4270 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4271 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4272 if (rcStrict != VINF_SUCCESS)
4273 {
4274 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4275 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4276 return rcStrict;
4277 }
4278
4279 /* Check that the descriptor indicates the new TSS is available (not busy). */
4280 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4281 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4282 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4283
4284 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4285 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4286 if (rcStrict != VINF_SUCCESS)
4287 {
4288 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4289 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4290 return rcStrict;
4291 }
4292 }
4293
4294 /*
4295 * From this point on, we're technically in the new task. We will defer exceptions
4296 * until the completion of the task switch but before executing any instructions in the new task.
4297 */
4298 pCtx->tr.Sel = SelTSS;
4299 pCtx->tr.ValidSel = SelTSS;
4300 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4301 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4302 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4303 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4304 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4305
4306 /* Set the busy bit in TR. */
4307 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4308 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4309 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4310 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4311 {
4312 uNewEflags |= X86_EFL_NT;
4313 }
4314
4315 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4316 pCtx->cr0 |= X86_CR0_TS;
4317 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4318
4319 pCtx->eip = uNewEip;
4320 pCtx->eax = uNewEax;
4321 pCtx->ecx = uNewEcx;
4322 pCtx->edx = uNewEdx;
4323 pCtx->ebx = uNewEbx;
4324 pCtx->esp = uNewEsp;
4325 pCtx->ebp = uNewEbp;
4326 pCtx->esi = uNewEsi;
4327 pCtx->edi = uNewEdi;
4328
4329 uNewEflags &= X86_EFL_LIVE_MASK;
4330 uNewEflags |= X86_EFL_RA1_MASK;
4331 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4332
4333 /*
4334 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4335 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4336 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4337 */
4338 pCtx->es.Sel = uNewES;
4339 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4340
4341 pCtx->cs.Sel = uNewCS;
4342 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4343
4344 pCtx->ss.Sel = uNewSS;
4345 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4346
4347 pCtx->ds.Sel = uNewDS;
4348 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4349
4350 pCtx->fs.Sel = uNewFS;
4351 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4352
4353 pCtx->gs.Sel = uNewGS;
4354 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4355 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4356
4357 pCtx->ldtr.Sel = uNewLdt;
4358 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4359 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4360 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4361
4362 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4363 {
4364 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4365 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4366 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4367 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4368 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4369 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4370 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4371 }
4372
4373 /*
4374 * Switch CR3 for the new task.
4375 */
4376 if ( fIsNewTSS386
4377 && (pCtx->cr0 & X86_CR0_PG))
4378 {
4379 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4380 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4381 {
4382 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4383 AssertRCSuccessReturn(rc, rc);
4384 }
4385 else
4386 pCtx->cr3 = uNewCr3;
4387
4388 /* Inform PGM. */
4389 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4390 {
4391 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4392 AssertRCReturn(rc, rc);
4393 /* ignore informational status codes */
4394 }
4395 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4396 }
4397
4398 /*
4399 * Switch LDTR for the new task.
4400 */
4401 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4402 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4403 else
4404 {
4405 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4406
4407 IEMSELDESC DescNewLdt;
4408 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4409 if (rcStrict != VINF_SUCCESS)
4410 {
4411 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4412 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4413 return rcStrict;
4414 }
4415 if ( !DescNewLdt.Legacy.Gen.u1Present
4416 || DescNewLdt.Legacy.Gen.u1DescType
4417 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4418 {
4419 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4420 uNewLdt, DescNewLdt.Legacy.u));
4421 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4422 }
4423
4424 pCtx->ldtr.ValidSel = uNewLdt;
4425 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4426 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4427 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4428 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4429 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4430 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4431 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4432 }
4433
4434 IEMSELDESC DescSS;
4435 if (IEM_IS_V86_MODE(pVCpu))
4436 {
4437 pVCpu->iem.s.uCpl = 3;
4438 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4439 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4440 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4441 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4442 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4443 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4444
4445 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4446 DescSS.Legacy.u = 0;
4447 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4448 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4449 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4450 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4451 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4452 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4453 DescSS.Legacy.Gen.u2Dpl = 3;
4454 }
4455 else
4456 {
4457 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4458
4459 /*
4460 * Load the stack segment for the new task.
4461 */
4462 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4463 {
4464 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4465 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4466 }
4467
4468 /* Fetch the descriptor. */
4469 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4470 if (rcStrict != VINF_SUCCESS)
4471 {
4472 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4473 VBOXSTRICTRC_VAL(rcStrict)));
4474 return rcStrict;
4475 }
4476
4477 /* SS must be a data segment and writable. */
4478 if ( !DescSS.Legacy.Gen.u1DescType
4479 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4480 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4481 {
4482 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4483 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4484 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4485 }
4486
4487 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4488 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4489 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4490 {
4491 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4492 uNewCpl));
4493 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4494 }
4495
4496 /* Is it there? */
4497 if (!DescSS.Legacy.Gen.u1Present)
4498 {
4499 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4500 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4501 }
4502
4503 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4504 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4505
4506 /* Set the accessed bit before committing the result into SS. */
4507 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4508 {
4509 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4510 if (rcStrict != VINF_SUCCESS)
4511 return rcStrict;
4512 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4513 }
4514
4515 /* Commit SS. */
4516 pCtx->ss.Sel = uNewSS;
4517 pCtx->ss.ValidSel = uNewSS;
4518 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4519 pCtx->ss.u32Limit = cbLimit;
4520 pCtx->ss.u64Base = u64Base;
4521 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4522 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4523
4524 /* CPL has changed, update IEM before loading rest of segments. */
4525 pVCpu->iem.s.uCpl = uNewCpl;
4526
4527 /*
4528 * Load the data segments for the new task.
4529 */
4530 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4531 if (rcStrict != VINF_SUCCESS)
4532 return rcStrict;
4533 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4534 if (rcStrict != VINF_SUCCESS)
4535 return rcStrict;
4536 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4537 if (rcStrict != VINF_SUCCESS)
4538 return rcStrict;
4539 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4540 if (rcStrict != VINF_SUCCESS)
4541 return rcStrict;
4542
4543 /*
4544 * Load the code segment for the new task.
4545 */
4546 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4547 {
4548 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4549 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4550 }
4551
4552 /* Fetch the descriptor. */
4553 IEMSELDESC DescCS;
4554 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4555 if (rcStrict != VINF_SUCCESS)
4556 {
4557 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4558 return rcStrict;
4559 }
4560
4561 /* CS must be a code segment. */
4562 if ( !DescCS.Legacy.Gen.u1DescType
4563 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4564 {
4565 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4566 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4567 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4568 }
4569
4570 /* For conforming CS, DPL must be less than or equal to the RPL. */
4571 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4572 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4573 {
4574 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4575 DescCS.Legacy.Gen.u2Dpl));
4576 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4577 }
4578
4579 /* For non-conforming CS, DPL must match RPL. */
4580 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4581 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4582 {
4583 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4584 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4585 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4586 }
4587
4588 /* Is it there? */
4589 if (!DescCS.Legacy.Gen.u1Present)
4590 {
4591 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4592 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4593 }
4594
4595 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4596 u64Base = X86DESC_BASE(&DescCS.Legacy);
4597
4598 /* Set the accessed bit before committing the result into CS. */
4599 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4600 {
4601 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4602 if (rcStrict != VINF_SUCCESS)
4603 return rcStrict;
4604 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4605 }
4606
4607 /* Commit CS. */
4608 pCtx->cs.Sel = uNewCS;
4609 pCtx->cs.ValidSel = uNewCS;
4610 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4611 pCtx->cs.u32Limit = cbLimit;
4612 pCtx->cs.u64Base = u64Base;
4613 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4614 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4615 }
4616
4617 /** @todo Debug trap. */
4618 if (fIsNewTSS386 && fNewDebugTrap)
4619 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4620
4621 /*
4622 * Construct the error code masks based on what caused this task switch.
4623 * See Intel Instruction reference for INT.
4624 */
4625 uint16_t uExt;
4626 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4627 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4628 {
4629 uExt = 1;
4630 }
4631 else
4632 uExt = 0;
4633
4634 /*
4635 * Push any error code on to the new stack.
4636 */
4637 if (fFlags & IEM_XCPT_FLAGS_ERR)
4638 {
4639 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4640 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4641 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4642
4643 /* Check that there is sufficient space on the stack. */
4644 /** @todo Factor out segment limit checking for normal/expand down segments
4645 * into a separate function. */
4646 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4647 {
4648 if ( pCtx->esp - 1 > cbLimitSS
4649 || pCtx->esp < cbStackFrame)
4650 {
4651 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4652 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4653 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4654 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4655 }
4656 }
4657 else
4658 {
4659 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4660 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4661 {
4662 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4663 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4664 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4665 }
4666 }
4667
4668
4669 if (fIsNewTSS386)
4670 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4671 else
4672 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4673 if (rcStrict != VINF_SUCCESS)
4674 {
4675 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4676 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4677 return rcStrict;
4678 }
4679 }
4680
4681 /* Check the new EIP against the new CS limit. */
4682 if (pCtx->eip > pCtx->cs.u32Limit)
4683 {
4684 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4685 pCtx->eip, pCtx->cs.u32Limit));
4686 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4687 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4688 }
4689
4690 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4691 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4692}
4693
4694
4695/**
4696 * Implements exceptions and interrupts for protected mode.
4697 *
4698 * @returns VBox strict status code.
4699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4700 * @param pCtx The CPU context.
4701 * @param cbInstr The number of bytes to offset rIP by in the return
4702 * address.
4703 * @param u8Vector The interrupt / exception vector number.
4704 * @param fFlags The flags.
4705 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4706 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4707 */
4708IEM_STATIC VBOXSTRICTRC
4709iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4710 PCPUMCTX pCtx,
4711 uint8_t cbInstr,
4712 uint8_t u8Vector,
4713 uint32_t fFlags,
4714 uint16_t uErr,
4715 uint64_t uCr2)
4716{
4717 /*
4718 * Read the IDT entry.
4719 */
4720 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4721 {
4722 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4723 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4724 }
4725 X86DESC Idte;
4726 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4727 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4728 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4729 return rcStrict;
4730 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4731 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4732 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4733
4734 /*
4735 * Check the descriptor type, DPL and such.
4736 * ASSUMES this is done in the same order as described for call-gate calls.
4737 */
4738 if (Idte.Gate.u1DescType)
4739 {
4740 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4741 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4742 }
4743 bool fTaskGate = false;
4744 uint8_t f32BitGate = true;
4745 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4746 switch (Idte.Gate.u4Type)
4747 {
4748 case X86_SEL_TYPE_SYS_UNDEFINED:
4749 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4750 case X86_SEL_TYPE_SYS_LDT:
4751 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4752 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4753 case X86_SEL_TYPE_SYS_UNDEFINED2:
4754 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4755 case X86_SEL_TYPE_SYS_UNDEFINED3:
4756 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4757 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4758 case X86_SEL_TYPE_SYS_UNDEFINED4:
4759 {
4760 /** @todo check what actually happens when the type is wrong...
4761 * esp. call gates. */
4762 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4763 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4764 }
4765
4766 case X86_SEL_TYPE_SYS_286_INT_GATE:
4767 f32BitGate = false;
4768 /* fall thru */
4769 case X86_SEL_TYPE_SYS_386_INT_GATE:
4770 fEflToClear |= X86_EFL_IF;
4771 break;
4772
4773 case X86_SEL_TYPE_SYS_TASK_GATE:
4774 fTaskGate = true;
4775#ifndef IEM_IMPLEMENTS_TASKSWITCH
4776 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4777#endif
4778 break;
4779
4780 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4781 f32BitGate = false;
4782 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4783 break;
4784
4785 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4786 }
4787
4788 /* Check DPL against CPL if applicable. */
4789 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4790 {
4791 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4792 {
4793 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4794 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4795 }
4796 }
4797
4798 /* Is it there? */
4799 if (!Idte.Gate.u1Present)
4800 {
4801 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4802 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4803 }
4804
4805 /* Is it a task-gate? */
4806 if (fTaskGate)
4807 {
4808 /*
4809 * Construct the error code masks based on what caused this task switch.
4810 * See Intel Instruction reference for INT.
4811 */
4812 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4813 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4814 RTSEL SelTSS = Idte.Gate.u16Sel;
4815
4816 /*
4817 * Fetch the TSS descriptor in the GDT.
4818 */
4819 IEMSELDESC DescTSS;
4820 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4821 if (rcStrict != VINF_SUCCESS)
4822 {
4823 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4824 VBOXSTRICTRC_VAL(rcStrict)));
4825 return rcStrict;
4826 }
4827
4828 /* The TSS descriptor must be a system segment and be available (not busy). */
4829 if ( DescTSS.Legacy.Gen.u1DescType
4830 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4831 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4832 {
4833 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4834 u8Vector, SelTSS, DescTSS.Legacy.au64));
4835 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4836 }
4837
4838 /* The TSS must be present. */
4839 if (!DescTSS.Legacy.Gen.u1Present)
4840 {
4841 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4842 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4843 }
4844
4845 /* Do the actual task switch. */
4846 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4847 }
4848
4849 /* A null CS is bad. */
4850 RTSEL NewCS = Idte.Gate.u16Sel;
4851 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4852 {
4853 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4854 return iemRaiseGeneralProtectionFault0(pVCpu);
4855 }
4856
4857 /* Fetch the descriptor for the new CS. */
4858 IEMSELDESC DescCS;
4859 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4860 if (rcStrict != VINF_SUCCESS)
4861 {
4862 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4863 return rcStrict;
4864 }
4865
4866 /* Must be a code segment. */
4867 if (!DescCS.Legacy.Gen.u1DescType)
4868 {
4869 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4870 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4871 }
4872 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4873 {
4874 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4875 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4876 }
4877
4878 /* Don't allow lowering the privilege level. */
4879 /** @todo Does the lowering of privileges apply to software interrupts
4880 * only? This has bearings on the more-privileged or
4881 * same-privilege stack behavior further down. A testcase would
4882 * be nice. */
4883 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4884 {
4885 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4886 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4887 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4888 }
4889
4890 /* Make sure the selector is present. */
4891 if (!DescCS.Legacy.Gen.u1Present)
4892 {
4893 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4894 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4895 }
4896
4897 /* Check the new EIP against the new CS limit. */
4898 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4899 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4900 ? Idte.Gate.u16OffsetLow
4901 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4902 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4903 if (uNewEip > cbLimitCS)
4904 {
4905 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4906 u8Vector, uNewEip, cbLimitCS, NewCS));
4907 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4908 }
4909 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4910
4911 /* Calc the flag image to push. */
4912 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4913 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4914 fEfl &= ~X86_EFL_RF;
4915 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4916 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4917
4918 /* From V8086 mode only go to CPL 0. */
4919 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4920 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4921 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4922 {
4923 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4924 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4925 }
4926
4927 /*
4928 * If the privilege level changes, we need to get a new stack from the TSS.
4929 * This in turns means validating the new SS and ESP...
4930 */
4931 if (uNewCpl != pVCpu->iem.s.uCpl)
4932 {
4933 RTSEL NewSS;
4934 uint32_t uNewEsp;
4935 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4936 if (rcStrict != VINF_SUCCESS)
4937 return rcStrict;
4938
4939 IEMSELDESC DescSS;
4940 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4941 if (rcStrict != VINF_SUCCESS)
4942 return rcStrict;
4943 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4944 if (!DescSS.Legacy.Gen.u1DefBig)
4945 {
4946 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4947 uNewEsp = (uint16_t)uNewEsp;
4948 }
4949
4950 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4951
4952 /* Check that there is sufficient space for the stack frame. */
4953 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4954 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4955 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4956 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4957
4958 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4959 {
4960 if ( uNewEsp - 1 > cbLimitSS
4961 || uNewEsp < cbStackFrame)
4962 {
4963 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4964 u8Vector, NewSS, uNewEsp, cbStackFrame));
4965 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4966 }
4967 }
4968 else
4969 {
4970 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4971 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4972 {
4973 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4974 u8Vector, NewSS, uNewEsp, cbStackFrame));
4975 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4976 }
4977 }
4978
4979 /*
4980 * Start making changes.
4981 */
4982
4983 /* Set the new CPL so that stack accesses use it. */
4984 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4985 pVCpu->iem.s.uCpl = uNewCpl;
4986
4987 /* Create the stack frame. */
4988 RTPTRUNION uStackFrame;
4989 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4990 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4991 if (rcStrict != VINF_SUCCESS)
4992 return rcStrict;
4993 void * const pvStackFrame = uStackFrame.pv;
4994 if (f32BitGate)
4995 {
4996 if (fFlags & IEM_XCPT_FLAGS_ERR)
4997 *uStackFrame.pu32++ = uErr;
4998 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4999 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5000 uStackFrame.pu32[2] = fEfl;
5001 uStackFrame.pu32[3] = pCtx->esp;
5002 uStackFrame.pu32[4] = pCtx->ss.Sel;
5003 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
5004 if (fEfl & X86_EFL_VM)
5005 {
5006 uStackFrame.pu32[1] = pCtx->cs.Sel;
5007 uStackFrame.pu32[5] = pCtx->es.Sel;
5008 uStackFrame.pu32[6] = pCtx->ds.Sel;
5009 uStackFrame.pu32[7] = pCtx->fs.Sel;
5010 uStackFrame.pu32[8] = pCtx->gs.Sel;
5011 }
5012 }
5013 else
5014 {
5015 if (fFlags & IEM_XCPT_FLAGS_ERR)
5016 *uStackFrame.pu16++ = uErr;
5017 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
5018 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5019 uStackFrame.pu16[2] = fEfl;
5020 uStackFrame.pu16[3] = pCtx->sp;
5021 uStackFrame.pu16[4] = pCtx->ss.Sel;
5022 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
5023 if (fEfl & X86_EFL_VM)
5024 {
5025 uStackFrame.pu16[1] = pCtx->cs.Sel;
5026 uStackFrame.pu16[5] = pCtx->es.Sel;
5027 uStackFrame.pu16[6] = pCtx->ds.Sel;
5028 uStackFrame.pu16[7] = pCtx->fs.Sel;
5029 uStackFrame.pu16[8] = pCtx->gs.Sel;
5030 }
5031 }
5032 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5033 if (rcStrict != VINF_SUCCESS)
5034 return rcStrict;
5035
5036 /* Mark the selectors 'accessed' (hope this is the correct time). */
5037 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5038 * after pushing the stack frame? (Write protect the gdt + stack to
5039 * find out.) */
5040 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5041 {
5042 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5043 if (rcStrict != VINF_SUCCESS)
5044 return rcStrict;
5045 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5046 }
5047
5048 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5049 {
5050 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5051 if (rcStrict != VINF_SUCCESS)
5052 return rcStrict;
5053 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5054 }
5055
5056 /*
5057 * Start comitting the register changes (joins with the DPL=CPL branch).
5058 */
5059 pCtx->ss.Sel = NewSS;
5060 pCtx->ss.ValidSel = NewSS;
5061 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5062 pCtx->ss.u32Limit = cbLimitSS;
5063 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5064 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5065 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5066 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5067 * SP is loaded).
5068 * Need to check the other combinations too:
5069 * - 16-bit TSS, 32-bit handler
5070 * - 32-bit TSS, 16-bit handler */
5071 if (!pCtx->ss.Attr.n.u1DefBig)
5072 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5073 else
5074 pCtx->rsp = uNewEsp - cbStackFrame;
5075
5076 if (fEfl & X86_EFL_VM)
5077 {
5078 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5079 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5080 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5081 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5082 }
5083 }
5084 /*
5085 * Same privilege, no stack change and smaller stack frame.
5086 */
5087 else
5088 {
5089 uint64_t uNewRsp;
5090 RTPTRUNION uStackFrame;
5091 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5092 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5093 if (rcStrict != VINF_SUCCESS)
5094 return rcStrict;
5095 void * const pvStackFrame = uStackFrame.pv;
5096
5097 if (f32BitGate)
5098 {
5099 if (fFlags & IEM_XCPT_FLAGS_ERR)
5100 *uStackFrame.pu32++ = uErr;
5101 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5102 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5103 uStackFrame.pu32[2] = fEfl;
5104 }
5105 else
5106 {
5107 if (fFlags & IEM_XCPT_FLAGS_ERR)
5108 *uStackFrame.pu16++ = uErr;
5109 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5110 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5111 uStackFrame.pu16[2] = fEfl;
5112 }
5113 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5114 if (rcStrict != VINF_SUCCESS)
5115 return rcStrict;
5116
5117 /* Mark the CS selector as 'accessed'. */
5118 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5119 {
5120 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5121 if (rcStrict != VINF_SUCCESS)
5122 return rcStrict;
5123 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5124 }
5125
5126 /*
5127 * Start committing the register changes (joins with the other branch).
5128 */
5129 pCtx->rsp = uNewRsp;
5130 }
5131
5132 /* ... register committing continues. */
5133 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5134 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5135 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5136 pCtx->cs.u32Limit = cbLimitCS;
5137 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5138 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5139
5140 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5141 fEfl &= ~fEflToClear;
5142 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5143
5144 if (fFlags & IEM_XCPT_FLAGS_CR2)
5145 pCtx->cr2 = uCr2;
5146
5147 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5148 iemRaiseXcptAdjustState(pCtx, u8Vector);
5149
5150 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5151}
5152
5153
5154/**
5155 * Implements exceptions and interrupts for long mode.
5156 *
5157 * @returns VBox strict status code.
5158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5159 * @param pCtx The CPU context.
5160 * @param cbInstr The number of bytes to offset rIP by in the return
5161 * address.
5162 * @param u8Vector The interrupt / exception vector number.
5163 * @param fFlags The flags.
5164 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5165 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5166 */
5167IEM_STATIC VBOXSTRICTRC
5168iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5169 PCPUMCTX pCtx,
5170 uint8_t cbInstr,
5171 uint8_t u8Vector,
5172 uint32_t fFlags,
5173 uint16_t uErr,
5174 uint64_t uCr2)
5175{
5176 /*
5177 * Read the IDT entry.
5178 */
5179 uint16_t offIdt = (uint16_t)u8Vector << 4;
5180 if (pCtx->idtr.cbIdt < offIdt + 7)
5181 {
5182 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5183 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5184 }
5185 X86DESC64 Idte;
5186 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5187 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5188 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5189 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5190 return rcStrict;
5191 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5192 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5193 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5194
5195 /*
5196 * Check the descriptor type, DPL and such.
5197 * ASSUMES this is done in the same order as described for call-gate calls.
5198 */
5199 if (Idte.Gate.u1DescType)
5200 {
5201 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5202 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5203 }
5204 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5205 switch (Idte.Gate.u4Type)
5206 {
5207 case AMD64_SEL_TYPE_SYS_INT_GATE:
5208 fEflToClear |= X86_EFL_IF;
5209 break;
5210 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5211 break;
5212
5213 default:
5214 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5215 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5216 }
5217
5218 /* Check DPL against CPL if applicable. */
5219 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5220 {
5221 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5222 {
5223 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5224 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5225 }
5226 }
5227
5228 /* Is it there? */
5229 if (!Idte.Gate.u1Present)
5230 {
5231 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5232 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5233 }
5234
5235 /* A null CS is bad. */
5236 RTSEL NewCS = Idte.Gate.u16Sel;
5237 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5238 {
5239 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5240 return iemRaiseGeneralProtectionFault0(pVCpu);
5241 }
5242
5243 /* Fetch the descriptor for the new CS. */
5244 IEMSELDESC DescCS;
5245 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5246 if (rcStrict != VINF_SUCCESS)
5247 {
5248 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5249 return rcStrict;
5250 }
5251
5252 /* Must be a 64-bit code segment. */
5253 if (!DescCS.Long.Gen.u1DescType)
5254 {
5255 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5256 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5257 }
5258 if ( !DescCS.Long.Gen.u1Long
5259 || DescCS.Long.Gen.u1DefBig
5260 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5261 {
5262 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5263 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5264 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5265 }
5266
5267 /* Don't allow lowering the privilege level. For non-conforming CS
5268 selectors, the CS.DPL sets the privilege level the trap/interrupt
5269 handler runs at. For conforming CS selectors, the CPL remains
5270 unchanged, but the CS.DPL must be <= CPL. */
5271 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5272 * when CPU in Ring-0. Result \#GP? */
5273 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5274 {
5275 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5276 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5277 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5278 }
5279
5280
5281 /* Make sure the selector is present. */
5282 if (!DescCS.Legacy.Gen.u1Present)
5283 {
5284 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5285 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5286 }
5287
5288 /* Check that the new RIP is canonical. */
5289 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5290 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5291 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5292 if (!IEM_IS_CANONICAL(uNewRip))
5293 {
5294 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5295 return iemRaiseGeneralProtectionFault0(pVCpu);
5296 }
5297
5298 /*
5299 * If the privilege level changes or if the IST isn't zero, we need to get
5300 * a new stack from the TSS.
5301 */
5302 uint64_t uNewRsp;
5303 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5304 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5305 if ( uNewCpl != pVCpu->iem.s.uCpl
5306 || Idte.Gate.u3IST != 0)
5307 {
5308 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5309 if (rcStrict != VINF_SUCCESS)
5310 return rcStrict;
5311 }
5312 else
5313 uNewRsp = pCtx->rsp;
5314 uNewRsp &= ~(uint64_t)0xf;
5315
5316 /*
5317 * Calc the flag image to push.
5318 */
5319 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5320 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5321 fEfl &= ~X86_EFL_RF;
5322 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5323 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5324
5325 /*
5326 * Start making changes.
5327 */
5328 /* Set the new CPL so that stack accesses use it. */
5329 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5330 pVCpu->iem.s.uCpl = uNewCpl;
5331
5332 /* Create the stack frame. */
5333 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5334 RTPTRUNION uStackFrame;
5335 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5336 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5337 if (rcStrict != VINF_SUCCESS)
5338 return rcStrict;
5339 void * const pvStackFrame = uStackFrame.pv;
5340
5341 if (fFlags & IEM_XCPT_FLAGS_ERR)
5342 *uStackFrame.pu64++ = uErr;
5343 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5344 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5345 uStackFrame.pu64[2] = fEfl;
5346 uStackFrame.pu64[3] = pCtx->rsp;
5347 uStackFrame.pu64[4] = pCtx->ss.Sel;
5348 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5349 if (rcStrict != VINF_SUCCESS)
5350 return rcStrict;
5351
5352 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5353 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5354 * after pushing the stack frame? (Write protect the gdt + stack to
5355 * find out.) */
5356 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5357 {
5358 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5359 if (rcStrict != VINF_SUCCESS)
5360 return rcStrict;
5361 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5362 }
5363
5364 /*
5365 * Start comitting the register changes.
5366 */
5367 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5368 * hidden registers when interrupting 32-bit or 16-bit code! */
5369 if (uNewCpl != uOldCpl)
5370 {
5371 pCtx->ss.Sel = 0 | uNewCpl;
5372 pCtx->ss.ValidSel = 0 | uNewCpl;
5373 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5374 pCtx->ss.u32Limit = UINT32_MAX;
5375 pCtx->ss.u64Base = 0;
5376 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5377 }
5378 pCtx->rsp = uNewRsp - cbStackFrame;
5379 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5380 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5381 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5382 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5383 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5384 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5385 pCtx->rip = uNewRip;
5386
5387 fEfl &= ~fEflToClear;
5388 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5389
5390 if (fFlags & IEM_XCPT_FLAGS_CR2)
5391 pCtx->cr2 = uCr2;
5392
5393 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5394 iemRaiseXcptAdjustState(pCtx, u8Vector);
5395
5396 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5397}
5398
5399
5400/**
5401 * Implements exceptions and interrupts.
5402 *
5403 * All exceptions and interrupts goes thru this function!
5404 *
5405 * @returns VBox strict status code.
5406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5407 * @param cbInstr The number of bytes to offset rIP by in the return
5408 * address.
5409 * @param u8Vector The interrupt / exception vector number.
5410 * @param fFlags The flags.
5411 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5412 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5413 */
5414DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5415iemRaiseXcptOrInt(PVMCPU pVCpu,
5416 uint8_t cbInstr,
5417 uint8_t u8Vector,
5418 uint32_t fFlags,
5419 uint16_t uErr,
5420 uint64_t uCr2)
5421{
5422 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5423#ifdef IN_RING0
5424 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5425 AssertRCReturn(rc, rc);
5426#endif
5427
5428#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5429 /*
5430 * Flush prefetch buffer
5431 */
5432 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5433#endif
5434
5435 /*
5436 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5437 */
5438 if ( pCtx->eflags.Bits.u1VM
5439 && pCtx->eflags.Bits.u2IOPL != 3
5440 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5441 && (pCtx->cr0 & X86_CR0_PE) )
5442 {
5443 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5444 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5445 u8Vector = X86_XCPT_GP;
5446 uErr = 0;
5447 }
5448#ifdef DBGFTRACE_ENABLED
5449 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5450 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5451 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5452#endif
5453
5454#ifdef VBOX_WITH_NESTED_HWVIRT
5455 if (IEM_IS_SVM_ENABLED(pVCpu))
5456 {
5457 /*
5458 * If the event is being injected as part of VMRUN, it isn't subject to event
5459 * intercepts in the nested-guest. However, secondary exceptions that occur
5460 * during injection of any event -are- subject to exception intercepts.
5461 * See AMD spec. 15.20 "Event Injection".
5462 */
5463 if (!pCtx->hwvirt.svm.fInterceptEvents)
5464 pCtx->hwvirt.svm.fInterceptEvents = 1;
5465 else
5466 {
5467 /*
5468 * Check and handle if the event being raised is intercepted.
5469 */
5470 VBOXSTRICTRC rcStrict0 = iemHandleSvmNstGstEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5471 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5472 return rcStrict0;
5473 }
5474 }
5475#endif /* VBOX_WITH_NESTED_HWVIRT */
5476
5477 /*
5478 * Do recursion accounting.
5479 */
5480 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5481 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5482 if (pVCpu->iem.s.cXcptRecursions == 0)
5483 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5484 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5485 else
5486 {
5487 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5488 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5489 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5490
5491 if (pVCpu->iem.s.cXcptRecursions >= 3)
5492 {
5493#ifdef DEBUG_bird
5494 AssertFailed();
5495#endif
5496 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5497 }
5498
5499 /*
5500 * Evaluate the sequence of recurring events.
5501 */
5502 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5503 NULL /* pXcptRaiseInfo */);
5504 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5505 { /* likely */ }
5506 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5507 {
5508 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5509 u8Vector = X86_XCPT_DF;
5510 uErr = 0;
5511 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5512 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5513 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5514 }
5515 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5516 {
5517 Log2(("iemRaiseXcptOrInt: raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5518 return iemInitiateCpuShutdown(pVCpu);
5519 }
5520 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5521 {
5522 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5523 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5524 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5525 return VERR_EM_GUEST_CPU_HANG;
5526 }
5527 else
5528 {
5529 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5530 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5531 return VERR_IEM_IPE_9;
5532 }
5533
5534 /*
5535 * The 'EXT' bit is set when an exception occurs during deliver of an external
5536 * event (such as an interrupt or earlier exception), see Intel spec. 6.13
5537 * "Error Code".
5538 *
5539 * For exceptions generated by software interrupts and INTO, INT3 instructions,
5540 * the 'EXT' bit will not be set, see Intel Instruction reference for INT n.
5541 */
5542 /** @todo Would INT1/ICEBP raised \#DB set the 'EXT' bit or not? Testcase... */
5543 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT))
5544 && (fFlags & IEM_XCPT_FLAGS_ERR)
5545 && u8Vector != X86_XCPT_PF
5546 && u8Vector != X86_XCPT_DF)
5547 {
5548 uErr |= X86_TRAP_ERR_EXTERNAL;
5549 }
5550 }
5551
5552 pVCpu->iem.s.cXcptRecursions++;
5553 pVCpu->iem.s.uCurXcpt = u8Vector;
5554 pVCpu->iem.s.fCurXcpt = fFlags;
5555 pVCpu->iem.s.uCurXcptErr = uErr;
5556 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5557
5558 /*
5559 * Extensive logging.
5560 */
5561#if defined(LOG_ENABLED) && defined(IN_RING3)
5562 if (LogIs3Enabled())
5563 {
5564 PVM pVM = pVCpu->CTX_SUFF(pVM);
5565 char szRegs[4096];
5566 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5567 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5568 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5569 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5570 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5571 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5572 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5573 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5574 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5575 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5576 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5577 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5578 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5579 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5580 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5581 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5582 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5583 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5584 " efer=%016VR{efer}\n"
5585 " pat=%016VR{pat}\n"
5586 " sf_mask=%016VR{sf_mask}\n"
5587 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5588 " lstar=%016VR{lstar}\n"
5589 " star=%016VR{star} cstar=%016VR{cstar}\n"
5590 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5591 );
5592
5593 char szInstr[256];
5594 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5595 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5596 szInstr, sizeof(szInstr), NULL);
5597 Log3(("%s%s\n", szRegs, szInstr));
5598 }
5599#endif /* LOG_ENABLED */
5600
5601 /*
5602 * Call the mode specific worker function.
5603 */
5604 VBOXSTRICTRC rcStrict;
5605 if (!(pCtx->cr0 & X86_CR0_PE))
5606 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5607 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5608 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5609 else
5610 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5611
5612 /* Flush the prefetch buffer. */
5613#ifdef IEM_WITH_CODE_TLB
5614 pVCpu->iem.s.pbInstrBuf = NULL;
5615#else
5616 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5617#endif
5618
5619 /*
5620 * Unwind.
5621 */
5622 pVCpu->iem.s.cXcptRecursions--;
5623 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5624 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5625 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5626 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5627 return rcStrict;
5628}
5629
5630#ifdef IEM_WITH_SETJMP
5631/**
5632 * See iemRaiseXcptOrInt. Will not return.
5633 */
5634IEM_STATIC DECL_NO_RETURN(void)
5635iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5636 uint8_t cbInstr,
5637 uint8_t u8Vector,
5638 uint32_t fFlags,
5639 uint16_t uErr,
5640 uint64_t uCr2)
5641{
5642 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5643 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5644}
5645#endif
5646
5647
5648/** \#DE - 00. */
5649DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5650{
5651 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5652}
5653
5654
5655/** \#DB - 01.
5656 * @note This automatically clear DR7.GD. */
5657DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5658{
5659 /** @todo set/clear RF. */
5660 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5661 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5662}
5663
5664
5665/** \#BR - 05. */
5666DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5667{
5668 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5669}
5670
5671
5672/** \#UD - 06. */
5673DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5674{
5675 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5676}
5677
5678
5679/** \#NM - 07. */
5680DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5681{
5682 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5683}
5684
5685
5686/** \#TS(err) - 0a. */
5687DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5688{
5689 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5690}
5691
5692
5693/** \#TS(tr) - 0a. */
5694DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5695{
5696 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5697 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5698}
5699
5700
5701/** \#TS(0) - 0a. */
5702DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5703{
5704 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5705 0, 0);
5706}
5707
5708
5709/** \#TS(err) - 0a. */
5710DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5711{
5712 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5713 uSel & X86_SEL_MASK_OFF_RPL, 0);
5714}
5715
5716
5717/** \#NP(err) - 0b. */
5718DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5719{
5720 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5721}
5722
5723
5724/** \#NP(sel) - 0b. */
5725DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5726{
5727 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5728 uSel & ~X86_SEL_RPL, 0);
5729}
5730
5731
5732/** \#SS(seg) - 0c. */
5733DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5734{
5735 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5736 uSel & ~X86_SEL_RPL, 0);
5737}
5738
5739
5740/** \#SS(err) - 0c. */
5741DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5742{
5743 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5744}
5745
5746
5747/** \#GP(n) - 0d. */
5748DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5749{
5750 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5751}
5752
5753
5754/** \#GP(0) - 0d. */
5755DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5756{
5757 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5758}
5759
5760#ifdef IEM_WITH_SETJMP
5761/** \#GP(0) - 0d. */
5762DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5763{
5764 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5765}
5766#endif
5767
5768
5769/** \#GP(sel) - 0d. */
5770DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5771{
5772 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5773 Sel & ~X86_SEL_RPL, 0);
5774}
5775
5776
5777/** \#GP(0) - 0d. */
5778DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5779{
5780 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5781}
5782
5783
5784/** \#GP(sel) - 0d. */
5785DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5786{
5787 NOREF(iSegReg); NOREF(fAccess);
5788 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5789 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5790}
5791
5792#ifdef IEM_WITH_SETJMP
5793/** \#GP(sel) - 0d, longjmp. */
5794DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5795{
5796 NOREF(iSegReg); NOREF(fAccess);
5797 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5798 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5799}
5800#endif
5801
5802/** \#GP(sel) - 0d. */
5803DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5804{
5805 NOREF(Sel);
5806 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5807}
5808
5809#ifdef IEM_WITH_SETJMP
5810/** \#GP(sel) - 0d, longjmp. */
5811DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5812{
5813 NOREF(Sel);
5814 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5815}
5816#endif
5817
5818
5819/** \#GP(sel) - 0d. */
5820DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5821{
5822 NOREF(iSegReg); NOREF(fAccess);
5823 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5824}
5825
5826#ifdef IEM_WITH_SETJMP
5827/** \#GP(sel) - 0d, longjmp. */
5828DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5829 uint32_t fAccess)
5830{
5831 NOREF(iSegReg); NOREF(fAccess);
5832 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5833}
5834#endif
5835
5836
5837/** \#PF(n) - 0e. */
5838DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5839{
5840 uint16_t uErr;
5841 switch (rc)
5842 {
5843 case VERR_PAGE_NOT_PRESENT:
5844 case VERR_PAGE_TABLE_NOT_PRESENT:
5845 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5846 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5847 uErr = 0;
5848 break;
5849
5850 default:
5851 AssertMsgFailed(("%Rrc\n", rc));
5852 /* fall thru */
5853 case VERR_ACCESS_DENIED:
5854 uErr = X86_TRAP_PF_P;
5855 break;
5856
5857 /** @todo reserved */
5858 }
5859
5860 if (pVCpu->iem.s.uCpl == 3)
5861 uErr |= X86_TRAP_PF_US;
5862
5863 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5864 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5865 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5866 uErr |= X86_TRAP_PF_ID;
5867
5868#if 0 /* This is so much non-sense, really. Why was it done like that? */
5869 /* Note! RW access callers reporting a WRITE protection fault, will clear
5870 the READ flag before calling. So, read-modify-write accesses (RW)
5871 can safely be reported as READ faults. */
5872 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5873 uErr |= X86_TRAP_PF_RW;
5874#else
5875 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5876 {
5877 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5878 uErr |= X86_TRAP_PF_RW;
5879 }
5880#endif
5881
5882 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5883 uErr, GCPtrWhere);
5884}
5885
5886#ifdef IEM_WITH_SETJMP
5887/** \#PF(n) - 0e, longjmp. */
5888IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5889{
5890 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5891}
5892#endif
5893
5894
5895/** \#MF(0) - 10. */
5896DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5897{
5898 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5899}
5900
5901
5902/** \#AC(0) - 11. */
5903DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5904{
5905 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5906}
5907
5908
5909/**
5910 * Macro for calling iemCImplRaiseDivideError().
5911 *
5912 * This enables us to add/remove arguments and force different levels of
5913 * inlining as we wish.
5914 *
5915 * @return Strict VBox status code.
5916 */
5917#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5918IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5919{
5920 NOREF(cbInstr);
5921 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5922}
5923
5924
5925/**
5926 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5927 *
5928 * This enables us to add/remove arguments and force different levels of
5929 * inlining as we wish.
5930 *
5931 * @return Strict VBox status code.
5932 */
5933#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5934IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5935{
5936 NOREF(cbInstr);
5937 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5938}
5939
5940
5941/**
5942 * Macro for calling iemCImplRaiseInvalidOpcode().
5943 *
5944 * This enables us to add/remove arguments and force different levels of
5945 * inlining as we wish.
5946 *
5947 * @return Strict VBox status code.
5948 */
5949#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5950IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5951{
5952 NOREF(cbInstr);
5953 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5954}
5955
5956
5957/** @} */
5958
5959
5960/*
5961 *
5962 * Helpers routines.
5963 * Helpers routines.
5964 * Helpers routines.
5965 *
5966 */
5967
5968/**
5969 * Recalculates the effective operand size.
5970 *
5971 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5972 */
5973IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5974{
5975 switch (pVCpu->iem.s.enmCpuMode)
5976 {
5977 case IEMMODE_16BIT:
5978 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5979 break;
5980 case IEMMODE_32BIT:
5981 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5982 break;
5983 case IEMMODE_64BIT:
5984 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5985 {
5986 case 0:
5987 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5988 break;
5989 case IEM_OP_PRF_SIZE_OP:
5990 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5991 break;
5992 case IEM_OP_PRF_SIZE_REX_W:
5993 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5994 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5995 break;
5996 }
5997 break;
5998 default:
5999 AssertFailed();
6000 }
6001}
6002
6003
6004/**
6005 * Sets the default operand size to 64-bit and recalculates the effective
6006 * operand size.
6007 *
6008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6009 */
6010IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6011{
6012 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6013 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6014 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6015 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6016 else
6017 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6018}
6019
6020
6021/*
6022 *
6023 * Common opcode decoders.
6024 * Common opcode decoders.
6025 * Common opcode decoders.
6026 *
6027 */
6028//#include <iprt/mem.h>
6029
6030/**
6031 * Used to add extra details about a stub case.
6032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6033 */
6034IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6035{
6036#if defined(LOG_ENABLED) && defined(IN_RING3)
6037 PVM pVM = pVCpu->CTX_SUFF(pVM);
6038 char szRegs[4096];
6039 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6040 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6041 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6042 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6043 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6044 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6045 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6046 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6047 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6048 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6049 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6050 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6051 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6052 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6053 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6054 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6055 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6056 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6057 " efer=%016VR{efer}\n"
6058 " pat=%016VR{pat}\n"
6059 " sf_mask=%016VR{sf_mask}\n"
6060 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6061 " lstar=%016VR{lstar}\n"
6062 " star=%016VR{star} cstar=%016VR{cstar}\n"
6063 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6064 );
6065
6066 char szInstr[256];
6067 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6068 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6069 szInstr, sizeof(szInstr), NULL);
6070
6071 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6072#else
6073 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6074#endif
6075}
6076
6077/**
6078 * Complains about a stub.
6079 *
6080 * Providing two versions of this macro, one for daily use and one for use when
6081 * working on IEM.
6082 */
6083#if 0
6084# define IEMOP_BITCH_ABOUT_STUB() \
6085 do { \
6086 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6087 iemOpStubMsg2(pVCpu); \
6088 RTAssertPanic(); \
6089 } while (0)
6090#else
6091# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6092#endif
6093
6094/** Stubs an opcode. */
6095#define FNIEMOP_STUB(a_Name) \
6096 FNIEMOP_DEF(a_Name) \
6097 { \
6098 RT_NOREF_PV(pVCpu); \
6099 IEMOP_BITCH_ABOUT_STUB(); \
6100 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6101 } \
6102 typedef int ignore_semicolon
6103
6104/** Stubs an opcode. */
6105#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6106 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6107 { \
6108 RT_NOREF_PV(pVCpu); \
6109 RT_NOREF_PV(a_Name0); \
6110 IEMOP_BITCH_ABOUT_STUB(); \
6111 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6112 } \
6113 typedef int ignore_semicolon
6114
6115/** Stubs an opcode which currently should raise \#UD. */
6116#define FNIEMOP_UD_STUB(a_Name) \
6117 FNIEMOP_DEF(a_Name) \
6118 { \
6119 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6120 return IEMOP_RAISE_INVALID_OPCODE(); \
6121 } \
6122 typedef int ignore_semicolon
6123
6124/** Stubs an opcode which currently should raise \#UD. */
6125#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6126 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6127 { \
6128 RT_NOREF_PV(pVCpu); \
6129 RT_NOREF_PV(a_Name0); \
6130 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6131 return IEMOP_RAISE_INVALID_OPCODE(); \
6132 } \
6133 typedef int ignore_semicolon
6134
6135
6136
6137/** @name Register Access.
6138 * @{
6139 */
6140
6141/**
6142 * Gets a reference (pointer) to the specified hidden segment register.
6143 *
6144 * @returns Hidden register reference.
6145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6146 * @param iSegReg The segment register.
6147 */
6148IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6149{
6150 Assert(iSegReg < X86_SREG_COUNT);
6151 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6152 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6153
6154#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6155 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6156 { /* likely */ }
6157 else
6158 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6159#else
6160 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6161#endif
6162 return pSReg;
6163}
6164
6165
6166/**
6167 * Ensures that the given hidden segment register is up to date.
6168 *
6169 * @returns Hidden register reference.
6170 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6171 * @param pSReg The segment register.
6172 */
6173IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6174{
6175#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6176 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6177 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6178#else
6179 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6180 NOREF(pVCpu);
6181#endif
6182 return pSReg;
6183}
6184
6185
6186/**
6187 * Gets a reference (pointer) to the specified segment register (the selector
6188 * value).
6189 *
6190 * @returns Pointer to the selector variable.
6191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6192 * @param iSegReg The segment register.
6193 */
6194DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6195{
6196 Assert(iSegReg < X86_SREG_COUNT);
6197 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6198 return &pCtx->aSRegs[iSegReg].Sel;
6199}
6200
6201
6202/**
6203 * Fetches the selector value of a segment register.
6204 *
6205 * @returns The selector value.
6206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6207 * @param iSegReg The segment register.
6208 */
6209DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6210{
6211 Assert(iSegReg < X86_SREG_COUNT);
6212 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6213}
6214
6215
6216/**
6217 * Gets a reference (pointer) to the specified general purpose register.
6218 *
6219 * @returns Register reference.
6220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6221 * @param iReg The general purpose register.
6222 */
6223DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6224{
6225 Assert(iReg < 16);
6226 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6227 return &pCtx->aGRegs[iReg];
6228}
6229
6230
6231/**
6232 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6233 *
6234 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6235 *
6236 * @returns Register reference.
6237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6238 * @param iReg The register.
6239 */
6240DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6241{
6242 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6243 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6244 {
6245 Assert(iReg < 16);
6246 return &pCtx->aGRegs[iReg].u8;
6247 }
6248 /* high 8-bit register. */
6249 Assert(iReg < 8);
6250 return &pCtx->aGRegs[iReg & 3].bHi;
6251}
6252
6253
6254/**
6255 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6256 *
6257 * @returns Register reference.
6258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6259 * @param iReg The register.
6260 */
6261DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6262{
6263 Assert(iReg < 16);
6264 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6265 return &pCtx->aGRegs[iReg].u16;
6266}
6267
6268
6269/**
6270 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6271 *
6272 * @returns Register reference.
6273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6274 * @param iReg The register.
6275 */
6276DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6277{
6278 Assert(iReg < 16);
6279 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6280 return &pCtx->aGRegs[iReg].u32;
6281}
6282
6283
6284/**
6285 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6286 *
6287 * @returns Register reference.
6288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6289 * @param iReg The register.
6290 */
6291DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6292{
6293 Assert(iReg < 64);
6294 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6295 return &pCtx->aGRegs[iReg].u64;
6296}
6297
6298
6299/**
6300 * Fetches the value of a 8-bit general purpose register.
6301 *
6302 * @returns The register value.
6303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6304 * @param iReg The register.
6305 */
6306DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6307{
6308 return *iemGRegRefU8(pVCpu, iReg);
6309}
6310
6311
6312/**
6313 * Fetches the value of a 16-bit general purpose register.
6314 *
6315 * @returns The register value.
6316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6317 * @param iReg The register.
6318 */
6319DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6320{
6321 Assert(iReg < 16);
6322 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6323}
6324
6325
6326/**
6327 * Fetches the value of a 32-bit general purpose register.
6328 *
6329 * @returns The register value.
6330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6331 * @param iReg The register.
6332 */
6333DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6334{
6335 Assert(iReg < 16);
6336 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6337}
6338
6339
6340/**
6341 * Fetches the value of a 64-bit general purpose register.
6342 *
6343 * @returns The register value.
6344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6345 * @param iReg The register.
6346 */
6347DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6348{
6349 Assert(iReg < 16);
6350 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6351}
6352
6353
6354/**
6355 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6356 *
6357 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6358 * segment limit.
6359 *
6360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6361 * @param offNextInstr The offset of the next instruction.
6362 */
6363IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6364{
6365 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6366 switch (pVCpu->iem.s.enmEffOpSize)
6367 {
6368 case IEMMODE_16BIT:
6369 {
6370 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6371 if ( uNewIp > pCtx->cs.u32Limit
6372 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6373 return iemRaiseGeneralProtectionFault0(pVCpu);
6374 pCtx->rip = uNewIp;
6375 break;
6376 }
6377
6378 case IEMMODE_32BIT:
6379 {
6380 Assert(pCtx->rip <= UINT32_MAX);
6381 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6382
6383 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6384 if (uNewEip > pCtx->cs.u32Limit)
6385 return iemRaiseGeneralProtectionFault0(pVCpu);
6386 pCtx->rip = uNewEip;
6387 break;
6388 }
6389
6390 case IEMMODE_64BIT:
6391 {
6392 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6393
6394 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6395 if (!IEM_IS_CANONICAL(uNewRip))
6396 return iemRaiseGeneralProtectionFault0(pVCpu);
6397 pCtx->rip = uNewRip;
6398 break;
6399 }
6400
6401 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6402 }
6403
6404 pCtx->eflags.Bits.u1RF = 0;
6405
6406#ifndef IEM_WITH_CODE_TLB
6407 /* Flush the prefetch buffer. */
6408 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6409#endif
6410
6411 return VINF_SUCCESS;
6412}
6413
6414
6415/**
6416 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6417 *
6418 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6419 * segment limit.
6420 *
6421 * @returns Strict VBox status code.
6422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6423 * @param offNextInstr The offset of the next instruction.
6424 */
6425IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6426{
6427 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6428 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6429
6430 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6431 if ( uNewIp > pCtx->cs.u32Limit
6432 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6433 return iemRaiseGeneralProtectionFault0(pVCpu);
6434 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6435 pCtx->rip = uNewIp;
6436 pCtx->eflags.Bits.u1RF = 0;
6437
6438#ifndef IEM_WITH_CODE_TLB
6439 /* Flush the prefetch buffer. */
6440 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6441#endif
6442
6443 return VINF_SUCCESS;
6444}
6445
6446
6447/**
6448 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6449 *
6450 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6451 * segment limit.
6452 *
6453 * @returns Strict VBox status code.
6454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6455 * @param offNextInstr The offset of the next instruction.
6456 */
6457IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6458{
6459 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6460 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6461
6462 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6463 {
6464 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6465
6466 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6467 if (uNewEip > pCtx->cs.u32Limit)
6468 return iemRaiseGeneralProtectionFault0(pVCpu);
6469 pCtx->rip = uNewEip;
6470 }
6471 else
6472 {
6473 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6474
6475 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6476 if (!IEM_IS_CANONICAL(uNewRip))
6477 return iemRaiseGeneralProtectionFault0(pVCpu);
6478 pCtx->rip = uNewRip;
6479 }
6480 pCtx->eflags.Bits.u1RF = 0;
6481
6482#ifndef IEM_WITH_CODE_TLB
6483 /* Flush the prefetch buffer. */
6484 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6485#endif
6486
6487 return VINF_SUCCESS;
6488}
6489
6490
6491/**
6492 * Performs a near jump to the specified address.
6493 *
6494 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6495 * segment limit.
6496 *
6497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6498 * @param uNewRip The new RIP value.
6499 */
6500IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6501{
6502 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6503 switch (pVCpu->iem.s.enmEffOpSize)
6504 {
6505 case IEMMODE_16BIT:
6506 {
6507 Assert(uNewRip <= UINT16_MAX);
6508 if ( uNewRip > pCtx->cs.u32Limit
6509 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6510 return iemRaiseGeneralProtectionFault0(pVCpu);
6511 /** @todo Test 16-bit jump in 64-bit mode. */
6512 pCtx->rip = uNewRip;
6513 break;
6514 }
6515
6516 case IEMMODE_32BIT:
6517 {
6518 Assert(uNewRip <= UINT32_MAX);
6519 Assert(pCtx->rip <= UINT32_MAX);
6520 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6521
6522 if (uNewRip > pCtx->cs.u32Limit)
6523 return iemRaiseGeneralProtectionFault0(pVCpu);
6524 pCtx->rip = uNewRip;
6525 break;
6526 }
6527
6528 case IEMMODE_64BIT:
6529 {
6530 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6531
6532 if (!IEM_IS_CANONICAL(uNewRip))
6533 return iemRaiseGeneralProtectionFault0(pVCpu);
6534 pCtx->rip = uNewRip;
6535 break;
6536 }
6537
6538 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6539 }
6540
6541 pCtx->eflags.Bits.u1RF = 0;
6542
6543#ifndef IEM_WITH_CODE_TLB
6544 /* Flush the prefetch buffer. */
6545 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6546#endif
6547
6548 return VINF_SUCCESS;
6549}
6550
6551
6552/**
6553 * Get the address of the top of the stack.
6554 *
6555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6556 * @param pCtx The CPU context which SP/ESP/RSP should be
6557 * read.
6558 */
6559DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6560{
6561 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6562 return pCtx->rsp;
6563 if (pCtx->ss.Attr.n.u1DefBig)
6564 return pCtx->esp;
6565 return pCtx->sp;
6566}
6567
6568
6569/**
6570 * Updates the RIP/EIP/IP to point to the next instruction.
6571 *
6572 * This function leaves the EFLAGS.RF flag alone.
6573 *
6574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6575 * @param cbInstr The number of bytes to add.
6576 */
6577IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6578{
6579 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6580 switch (pVCpu->iem.s.enmCpuMode)
6581 {
6582 case IEMMODE_16BIT:
6583 Assert(pCtx->rip <= UINT16_MAX);
6584 pCtx->eip += cbInstr;
6585 pCtx->eip &= UINT32_C(0xffff);
6586 break;
6587
6588 case IEMMODE_32BIT:
6589 pCtx->eip += cbInstr;
6590 Assert(pCtx->rip <= UINT32_MAX);
6591 break;
6592
6593 case IEMMODE_64BIT:
6594 pCtx->rip += cbInstr;
6595 break;
6596 default: AssertFailed();
6597 }
6598}
6599
6600
6601#if 0
6602/**
6603 * Updates the RIP/EIP/IP to point to the next instruction.
6604 *
6605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6606 */
6607IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6608{
6609 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6610}
6611#endif
6612
6613
6614
6615/**
6616 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6617 *
6618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6619 * @param cbInstr The number of bytes to add.
6620 */
6621IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6622{
6623 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6624
6625 pCtx->eflags.Bits.u1RF = 0;
6626
6627 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6628#if ARCH_BITS >= 64
6629 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6630 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6631 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6632#else
6633 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6634 pCtx->rip += cbInstr;
6635 else
6636 {
6637 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6638 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6639 }
6640#endif
6641}
6642
6643
6644/**
6645 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6646 *
6647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6648 */
6649IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6650{
6651 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6652}
6653
6654
6655/**
6656 * Adds to the stack pointer.
6657 *
6658 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6659 * @param pCtx The CPU context which SP/ESP/RSP should be
6660 * updated.
6661 * @param cbToAdd The number of bytes to add (8-bit!).
6662 */
6663DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6664{
6665 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6666 pCtx->rsp += cbToAdd;
6667 else if (pCtx->ss.Attr.n.u1DefBig)
6668 pCtx->esp += cbToAdd;
6669 else
6670 pCtx->sp += cbToAdd;
6671}
6672
6673
6674/**
6675 * Subtracts from the stack pointer.
6676 *
6677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6678 * @param pCtx The CPU context which SP/ESP/RSP should be
6679 * updated.
6680 * @param cbToSub The number of bytes to subtract (8-bit!).
6681 */
6682DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6683{
6684 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6685 pCtx->rsp -= cbToSub;
6686 else if (pCtx->ss.Attr.n.u1DefBig)
6687 pCtx->esp -= cbToSub;
6688 else
6689 pCtx->sp -= cbToSub;
6690}
6691
6692
6693/**
6694 * Adds to the temporary stack pointer.
6695 *
6696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6697 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6698 * @param cbToAdd The number of bytes to add (16-bit).
6699 * @param pCtx Where to get the current stack mode.
6700 */
6701DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6702{
6703 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6704 pTmpRsp->u += cbToAdd;
6705 else if (pCtx->ss.Attr.n.u1DefBig)
6706 pTmpRsp->DWords.dw0 += cbToAdd;
6707 else
6708 pTmpRsp->Words.w0 += cbToAdd;
6709}
6710
6711
6712/**
6713 * Subtracts from the temporary stack pointer.
6714 *
6715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6716 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6717 * @param cbToSub The number of bytes to subtract.
6718 * @param pCtx Where to get the current stack mode.
6719 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6720 * expecting that.
6721 */
6722DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6723{
6724 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6725 pTmpRsp->u -= cbToSub;
6726 else if (pCtx->ss.Attr.n.u1DefBig)
6727 pTmpRsp->DWords.dw0 -= cbToSub;
6728 else
6729 pTmpRsp->Words.w0 -= cbToSub;
6730}
6731
6732
6733/**
6734 * Calculates the effective stack address for a push of the specified size as
6735 * well as the new RSP value (upper bits may be masked).
6736 *
6737 * @returns Effective stack addressf for the push.
6738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6739 * @param pCtx Where to get the current stack mode.
6740 * @param cbItem The size of the stack item to pop.
6741 * @param puNewRsp Where to return the new RSP value.
6742 */
6743DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6744{
6745 RTUINT64U uTmpRsp;
6746 RTGCPTR GCPtrTop;
6747 uTmpRsp.u = pCtx->rsp;
6748
6749 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6750 GCPtrTop = uTmpRsp.u -= cbItem;
6751 else if (pCtx->ss.Attr.n.u1DefBig)
6752 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6753 else
6754 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6755 *puNewRsp = uTmpRsp.u;
6756 return GCPtrTop;
6757}
6758
6759
6760/**
6761 * Gets the current stack pointer and calculates the value after a pop of the
6762 * specified size.
6763 *
6764 * @returns Current stack pointer.
6765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6766 * @param pCtx Where to get the current stack mode.
6767 * @param cbItem The size of the stack item to pop.
6768 * @param puNewRsp Where to return the new RSP value.
6769 */
6770DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6771{
6772 RTUINT64U uTmpRsp;
6773 RTGCPTR GCPtrTop;
6774 uTmpRsp.u = pCtx->rsp;
6775
6776 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6777 {
6778 GCPtrTop = uTmpRsp.u;
6779 uTmpRsp.u += cbItem;
6780 }
6781 else if (pCtx->ss.Attr.n.u1DefBig)
6782 {
6783 GCPtrTop = uTmpRsp.DWords.dw0;
6784 uTmpRsp.DWords.dw0 += cbItem;
6785 }
6786 else
6787 {
6788 GCPtrTop = uTmpRsp.Words.w0;
6789 uTmpRsp.Words.w0 += cbItem;
6790 }
6791 *puNewRsp = uTmpRsp.u;
6792 return GCPtrTop;
6793}
6794
6795
6796/**
6797 * Calculates the effective stack address for a push of the specified size as
6798 * well as the new temporary RSP value (upper bits may be masked).
6799 *
6800 * @returns Effective stack addressf for the push.
6801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6802 * @param pCtx Where to get the current stack mode.
6803 * @param pTmpRsp The temporary stack pointer. This is updated.
6804 * @param cbItem The size of the stack item to pop.
6805 */
6806DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6807{
6808 RTGCPTR GCPtrTop;
6809
6810 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6811 GCPtrTop = pTmpRsp->u -= cbItem;
6812 else if (pCtx->ss.Attr.n.u1DefBig)
6813 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6814 else
6815 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6816 return GCPtrTop;
6817}
6818
6819
6820/**
6821 * Gets the effective stack address for a pop of the specified size and
6822 * calculates and updates the temporary RSP.
6823 *
6824 * @returns Current stack pointer.
6825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6826 * @param pCtx Where to get the current stack mode.
6827 * @param pTmpRsp The temporary stack pointer. This is updated.
6828 * @param cbItem The size of the stack item to pop.
6829 */
6830DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6831{
6832 RTGCPTR GCPtrTop;
6833 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6834 {
6835 GCPtrTop = pTmpRsp->u;
6836 pTmpRsp->u += cbItem;
6837 }
6838 else if (pCtx->ss.Attr.n.u1DefBig)
6839 {
6840 GCPtrTop = pTmpRsp->DWords.dw0;
6841 pTmpRsp->DWords.dw0 += cbItem;
6842 }
6843 else
6844 {
6845 GCPtrTop = pTmpRsp->Words.w0;
6846 pTmpRsp->Words.w0 += cbItem;
6847 }
6848 return GCPtrTop;
6849}
6850
6851/** @} */
6852
6853
6854/** @name FPU access and helpers.
6855 *
6856 * @{
6857 */
6858
6859
6860/**
6861 * Hook for preparing to use the host FPU.
6862 *
6863 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6864 *
6865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6866 */
6867DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6868{
6869#ifdef IN_RING3
6870 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6871#else
6872 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6873#endif
6874}
6875
6876
6877/**
6878 * Hook for preparing to use the host FPU for SSE.
6879 *
6880 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6881 *
6882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6883 */
6884DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6885{
6886 iemFpuPrepareUsage(pVCpu);
6887}
6888
6889
6890/**
6891 * Hook for preparing to use the host FPU for AVX.
6892 *
6893 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6894 *
6895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6896 */
6897DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6898{
6899 iemFpuPrepareUsage(pVCpu);
6900}
6901
6902
6903/**
6904 * Hook for actualizing the guest FPU state before the interpreter reads it.
6905 *
6906 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6907 *
6908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6909 */
6910DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6911{
6912#ifdef IN_RING3
6913 NOREF(pVCpu);
6914#else
6915 CPUMRZFpuStateActualizeForRead(pVCpu);
6916#endif
6917}
6918
6919
6920/**
6921 * Hook for actualizing the guest FPU state before the interpreter changes it.
6922 *
6923 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6924 *
6925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6926 */
6927DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6928{
6929#ifdef IN_RING3
6930 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6931#else
6932 CPUMRZFpuStateActualizeForChange(pVCpu);
6933#endif
6934}
6935
6936
6937/**
6938 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6939 * only.
6940 *
6941 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6942 *
6943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6944 */
6945DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6946{
6947#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6948 NOREF(pVCpu);
6949#else
6950 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6951#endif
6952}
6953
6954
6955/**
6956 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6957 * read+write.
6958 *
6959 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6960 *
6961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6962 */
6963DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6964{
6965#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6966 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6967#else
6968 CPUMRZFpuStateActualizeForChange(pVCpu);
6969#endif
6970}
6971
6972
6973/**
6974 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6975 * only.
6976 *
6977 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6978 *
6979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6980 */
6981DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6982{
6983#ifdef IN_RING3
6984 NOREF(pVCpu);
6985#else
6986 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6987#endif
6988}
6989
6990
6991/**
6992 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6993 * read+write.
6994 *
6995 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6996 *
6997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6998 */
6999DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7000{
7001#ifdef IN_RING3
7002 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7003#else
7004 CPUMRZFpuStateActualizeForChange(pVCpu);
7005#endif
7006}
7007
7008
7009/**
7010 * Stores a QNaN value into a FPU register.
7011 *
7012 * @param pReg Pointer to the register.
7013 */
7014DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7015{
7016 pReg->au32[0] = UINT32_C(0x00000000);
7017 pReg->au32[1] = UINT32_C(0xc0000000);
7018 pReg->au16[4] = UINT16_C(0xffff);
7019}
7020
7021
7022/**
7023 * Updates the FOP, FPU.CS and FPUIP registers.
7024 *
7025 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7026 * @param pCtx The CPU context.
7027 * @param pFpuCtx The FPU context.
7028 */
7029DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
7030{
7031 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7032 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7033 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7034 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7035 {
7036 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7037 * happens in real mode here based on the fnsave and fnstenv images. */
7038 pFpuCtx->CS = 0;
7039 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
7040 }
7041 else
7042 {
7043 pFpuCtx->CS = pCtx->cs.Sel;
7044 pFpuCtx->FPUIP = pCtx->rip;
7045 }
7046}
7047
7048
7049/**
7050 * Updates the x87.DS and FPUDP registers.
7051 *
7052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7053 * @param pCtx The CPU context.
7054 * @param pFpuCtx The FPU context.
7055 * @param iEffSeg The effective segment register.
7056 * @param GCPtrEff The effective address relative to @a iEffSeg.
7057 */
7058DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7059{
7060 RTSEL sel;
7061 switch (iEffSeg)
7062 {
7063 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7064 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7065 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7066 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7067 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7068 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7069 default:
7070 AssertMsgFailed(("%d\n", iEffSeg));
7071 sel = pCtx->ds.Sel;
7072 }
7073 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7074 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7075 {
7076 pFpuCtx->DS = 0;
7077 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7078 }
7079 else
7080 {
7081 pFpuCtx->DS = sel;
7082 pFpuCtx->FPUDP = GCPtrEff;
7083 }
7084}
7085
7086
7087/**
7088 * Rotates the stack registers in the push direction.
7089 *
7090 * @param pFpuCtx The FPU context.
7091 * @remarks This is a complete waste of time, but fxsave stores the registers in
7092 * stack order.
7093 */
7094DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7095{
7096 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7097 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7098 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7099 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7100 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7101 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7102 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7103 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7104 pFpuCtx->aRegs[0].r80 = r80Tmp;
7105}
7106
7107
7108/**
7109 * Rotates the stack registers in the pop direction.
7110 *
7111 * @param pFpuCtx The FPU context.
7112 * @remarks This is a complete waste of time, but fxsave stores the registers in
7113 * stack order.
7114 */
7115DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7116{
7117 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7118 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7119 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7120 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7121 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7122 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7123 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7124 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7125 pFpuCtx->aRegs[7].r80 = r80Tmp;
7126}
7127
7128
7129/**
7130 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7131 * exception prevents it.
7132 *
7133 * @param pResult The FPU operation result to push.
7134 * @param pFpuCtx The FPU context.
7135 */
7136IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7137{
7138 /* Update FSW and bail if there are pending exceptions afterwards. */
7139 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7140 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7141 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7142 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7143 {
7144 pFpuCtx->FSW = fFsw;
7145 return;
7146 }
7147
7148 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7149 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7150 {
7151 /* All is fine, push the actual value. */
7152 pFpuCtx->FTW |= RT_BIT(iNewTop);
7153 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7154 }
7155 else if (pFpuCtx->FCW & X86_FCW_IM)
7156 {
7157 /* Masked stack overflow, push QNaN. */
7158 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7159 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7160 }
7161 else
7162 {
7163 /* Raise stack overflow, don't push anything. */
7164 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7165 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7166 return;
7167 }
7168
7169 fFsw &= ~X86_FSW_TOP_MASK;
7170 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7171 pFpuCtx->FSW = fFsw;
7172
7173 iemFpuRotateStackPush(pFpuCtx);
7174}
7175
7176
7177/**
7178 * Stores a result in a FPU register and updates the FSW and FTW.
7179 *
7180 * @param pFpuCtx The FPU context.
7181 * @param pResult The result to store.
7182 * @param iStReg Which FPU register to store it in.
7183 */
7184IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7185{
7186 Assert(iStReg < 8);
7187 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7188 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7189 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7190 pFpuCtx->FTW |= RT_BIT(iReg);
7191 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7192}
7193
7194
7195/**
7196 * Only updates the FPU status word (FSW) with the result of the current
7197 * instruction.
7198 *
7199 * @param pFpuCtx The FPU context.
7200 * @param u16FSW The FSW output of the current instruction.
7201 */
7202IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7203{
7204 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7205 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7206}
7207
7208
7209/**
7210 * Pops one item off the FPU stack if no pending exception prevents it.
7211 *
7212 * @param pFpuCtx The FPU context.
7213 */
7214IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7215{
7216 /* Check pending exceptions. */
7217 uint16_t uFSW = pFpuCtx->FSW;
7218 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7219 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7220 return;
7221
7222 /* TOP--. */
7223 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7224 uFSW &= ~X86_FSW_TOP_MASK;
7225 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7226 pFpuCtx->FSW = uFSW;
7227
7228 /* Mark the previous ST0 as empty. */
7229 iOldTop >>= X86_FSW_TOP_SHIFT;
7230 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7231
7232 /* Rotate the registers. */
7233 iemFpuRotateStackPop(pFpuCtx);
7234}
7235
7236
7237/**
7238 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7239 *
7240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7241 * @param pResult The FPU operation result to push.
7242 */
7243IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7244{
7245 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7246 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7247 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7248 iemFpuMaybePushResult(pResult, pFpuCtx);
7249}
7250
7251
7252/**
7253 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7254 * and sets FPUDP and FPUDS.
7255 *
7256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7257 * @param pResult The FPU operation result to push.
7258 * @param iEffSeg The effective segment register.
7259 * @param GCPtrEff The effective address relative to @a iEffSeg.
7260 */
7261IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7262{
7263 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7264 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7265 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7266 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7267 iemFpuMaybePushResult(pResult, pFpuCtx);
7268}
7269
7270
7271/**
7272 * Replace ST0 with the first value and push the second onto the FPU stack,
7273 * unless a pending exception prevents it.
7274 *
7275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7276 * @param pResult The FPU operation result to store and push.
7277 */
7278IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7279{
7280 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7281 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7282 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7283
7284 /* Update FSW and bail if there are pending exceptions afterwards. */
7285 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7286 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7287 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7288 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7289 {
7290 pFpuCtx->FSW = fFsw;
7291 return;
7292 }
7293
7294 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7295 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7296 {
7297 /* All is fine, push the actual value. */
7298 pFpuCtx->FTW |= RT_BIT(iNewTop);
7299 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7300 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7301 }
7302 else if (pFpuCtx->FCW & X86_FCW_IM)
7303 {
7304 /* Masked stack overflow, push QNaN. */
7305 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7306 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7307 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7308 }
7309 else
7310 {
7311 /* Raise stack overflow, don't push anything. */
7312 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7313 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7314 return;
7315 }
7316
7317 fFsw &= ~X86_FSW_TOP_MASK;
7318 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7319 pFpuCtx->FSW = fFsw;
7320
7321 iemFpuRotateStackPush(pFpuCtx);
7322}
7323
7324
7325/**
7326 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7327 * FOP.
7328 *
7329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7330 * @param pResult The result to store.
7331 * @param iStReg Which FPU register to store it in.
7332 */
7333IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7334{
7335 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7336 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7337 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7338 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7339}
7340
7341
7342/**
7343 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7344 * FOP, and then pops the stack.
7345 *
7346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7347 * @param pResult The result to store.
7348 * @param iStReg Which FPU register to store it in.
7349 */
7350IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7351{
7352 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7353 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7354 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7355 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7356 iemFpuMaybePopOne(pFpuCtx);
7357}
7358
7359
7360/**
7361 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7362 * FPUDP, and FPUDS.
7363 *
7364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7365 * @param pResult The result to store.
7366 * @param iStReg Which FPU register to store it in.
7367 * @param iEffSeg The effective memory operand selector register.
7368 * @param GCPtrEff The effective memory operand offset.
7369 */
7370IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7371 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7372{
7373 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7374 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7375 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7376 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7377 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7378}
7379
7380
7381/**
7382 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7383 * FPUDP, and FPUDS, and then pops the stack.
7384 *
7385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7386 * @param pResult The result to store.
7387 * @param iStReg Which FPU register to store it in.
7388 * @param iEffSeg The effective memory operand selector register.
7389 * @param GCPtrEff The effective memory operand offset.
7390 */
7391IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7392 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7393{
7394 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7395 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7396 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7397 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7398 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7399 iemFpuMaybePopOne(pFpuCtx);
7400}
7401
7402
7403/**
7404 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7405 *
7406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7407 */
7408IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7409{
7410 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7411 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7412 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7413}
7414
7415
7416/**
7417 * Marks the specified stack register as free (for FFREE).
7418 *
7419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7420 * @param iStReg The register to free.
7421 */
7422IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7423{
7424 Assert(iStReg < 8);
7425 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7426 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7427 pFpuCtx->FTW &= ~RT_BIT(iReg);
7428}
7429
7430
7431/**
7432 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7433 *
7434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7435 */
7436IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7437{
7438 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7439 uint16_t uFsw = pFpuCtx->FSW;
7440 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7441 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7442 uFsw &= ~X86_FSW_TOP_MASK;
7443 uFsw |= uTop;
7444 pFpuCtx->FSW = uFsw;
7445}
7446
7447
7448/**
7449 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7450 *
7451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7452 */
7453IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7454{
7455 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7456 uint16_t uFsw = pFpuCtx->FSW;
7457 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7458 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7459 uFsw &= ~X86_FSW_TOP_MASK;
7460 uFsw |= uTop;
7461 pFpuCtx->FSW = uFsw;
7462}
7463
7464
7465/**
7466 * Updates the FSW, FOP, FPUIP, and FPUCS.
7467 *
7468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7469 * @param u16FSW The FSW from the current instruction.
7470 */
7471IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7472{
7473 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7474 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7475 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7476 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7477}
7478
7479
7480/**
7481 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7482 *
7483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7484 * @param u16FSW The FSW from the current instruction.
7485 */
7486IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7487{
7488 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7489 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7490 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7491 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7492 iemFpuMaybePopOne(pFpuCtx);
7493}
7494
7495
7496/**
7497 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7498 *
7499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7500 * @param u16FSW The FSW from the current instruction.
7501 * @param iEffSeg The effective memory operand selector register.
7502 * @param GCPtrEff The effective memory operand offset.
7503 */
7504IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7505{
7506 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7507 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7508 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7509 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7510 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7511}
7512
7513
7514/**
7515 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7516 *
7517 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7518 * @param u16FSW The FSW from the current instruction.
7519 */
7520IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7521{
7522 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7523 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7524 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7525 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7526 iemFpuMaybePopOne(pFpuCtx);
7527 iemFpuMaybePopOne(pFpuCtx);
7528}
7529
7530
7531/**
7532 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7533 *
7534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7535 * @param u16FSW The FSW from the current instruction.
7536 * @param iEffSeg The effective memory operand selector register.
7537 * @param GCPtrEff The effective memory operand offset.
7538 */
7539IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7540{
7541 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7542 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7543 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7544 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7545 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7546 iemFpuMaybePopOne(pFpuCtx);
7547}
7548
7549
7550/**
7551 * Worker routine for raising an FPU stack underflow exception.
7552 *
7553 * @param pFpuCtx The FPU context.
7554 * @param iStReg The stack register being accessed.
7555 */
7556IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7557{
7558 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7559 if (pFpuCtx->FCW & X86_FCW_IM)
7560 {
7561 /* Masked underflow. */
7562 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7563 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7564 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7565 if (iStReg != UINT8_MAX)
7566 {
7567 pFpuCtx->FTW |= RT_BIT(iReg);
7568 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7569 }
7570 }
7571 else
7572 {
7573 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7574 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7575 }
7576}
7577
7578
7579/**
7580 * Raises a FPU stack underflow exception.
7581 *
7582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7583 * @param iStReg The destination register that should be loaded
7584 * with QNaN if \#IS is not masked. Specify
7585 * UINT8_MAX if none (like for fcom).
7586 */
7587DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7588{
7589 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7590 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7591 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7592 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7593}
7594
7595
7596DECL_NO_INLINE(IEM_STATIC, void)
7597iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7598{
7599 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7600 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7601 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7602 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7603 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7604}
7605
7606
7607DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7608{
7609 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7610 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7611 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7612 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7613 iemFpuMaybePopOne(pFpuCtx);
7614}
7615
7616
7617DECL_NO_INLINE(IEM_STATIC, void)
7618iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7619{
7620 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7621 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7622 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7623 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7624 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7625 iemFpuMaybePopOne(pFpuCtx);
7626}
7627
7628
7629DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7630{
7631 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7632 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7633 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7634 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7635 iemFpuMaybePopOne(pFpuCtx);
7636 iemFpuMaybePopOne(pFpuCtx);
7637}
7638
7639
7640DECL_NO_INLINE(IEM_STATIC, void)
7641iemFpuStackPushUnderflow(PVMCPU pVCpu)
7642{
7643 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7644 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7645 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7646
7647 if (pFpuCtx->FCW & X86_FCW_IM)
7648 {
7649 /* Masked overflow - Push QNaN. */
7650 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7651 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7652 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7653 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7654 pFpuCtx->FTW |= RT_BIT(iNewTop);
7655 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7656 iemFpuRotateStackPush(pFpuCtx);
7657 }
7658 else
7659 {
7660 /* Exception pending - don't change TOP or the register stack. */
7661 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7662 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7663 }
7664}
7665
7666
7667DECL_NO_INLINE(IEM_STATIC, void)
7668iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7669{
7670 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7671 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7672 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7673
7674 if (pFpuCtx->FCW & X86_FCW_IM)
7675 {
7676 /* Masked overflow - Push QNaN. */
7677 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7678 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7679 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7680 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7681 pFpuCtx->FTW |= RT_BIT(iNewTop);
7682 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7683 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7684 iemFpuRotateStackPush(pFpuCtx);
7685 }
7686 else
7687 {
7688 /* Exception pending - don't change TOP or the register stack. */
7689 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7690 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7691 }
7692}
7693
7694
7695/**
7696 * Worker routine for raising an FPU stack overflow exception on a push.
7697 *
7698 * @param pFpuCtx The FPU context.
7699 */
7700IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7701{
7702 if (pFpuCtx->FCW & X86_FCW_IM)
7703 {
7704 /* Masked overflow. */
7705 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7706 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7707 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7708 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7709 pFpuCtx->FTW |= RT_BIT(iNewTop);
7710 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7711 iemFpuRotateStackPush(pFpuCtx);
7712 }
7713 else
7714 {
7715 /* Exception pending - don't change TOP or the register stack. */
7716 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7717 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7718 }
7719}
7720
7721
7722/**
7723 * Raises a FPU stack overflow exception on a push.
7724 *
7725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7726 */
7727DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7728{
7729 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7730 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7731 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7732 iemFpuStackPushOverflowOnly(pFpuCtx);
7733}
7734
7735
7736/**
7737 * Raises a FPU stack overflow exception on a push with a memory operand.
7738 *
7739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7740 * @param iEffSeg The effective memory operand selector register.
7741 * @param GCPtrEff The effective memory operand offset.
7742 */
7743DECL_NO_INLINE(IEM_STATIC, void)
7744iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7745{
7746 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7747 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7748 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7749 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7750 iemFpuStackPushOverflowOnly(pFpuCtx);
7751}
7752
7753
7754IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7755{
7756 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7757 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7758 if (pFpuCtx->FTW & RT_BIT(iReg))
7759 return VINF_SUCCESS;
7760 return VERR_NOT_FOUND;
7761}
7762
7763
7764IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7765{
7766 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7767 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7768 if (pFpuCtx->FTW & RT_BIT(iReg))
7769 {
7770 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7771 return VINF_SUCCESS;
7772 }
7773 return VERR_NOT_FOUND;
7774}
7775
7776
7777IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7778 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7779{
7780 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7781 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7782 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7783 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7784 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7785 {
7786 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7787 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7788 return VINF_SUCCESS;
7789 }
7790 return VERR_NOT_FOUND;
7791}
7792
7793
7794IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7795{
7796 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7797 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7798 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7799 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7800 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7801 {
7802 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7803 return VINF_SUCCESS;
7804 }
7805 return VERR_NOT_FOUND;
7806}
7807
7808
7809/**
7810 * Updates the FPU exception status after FCW is changed.
7811 *
7812 * @param pFpuCtx The FPU context.
7813 */
7814IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7815{
7816 uint16_t u16Fsw = pFpuCtx->FSW;
7817 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7818 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7819 else
7820 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7821 pFpuCtx->FSW = u16Fsw;
7822}
7823
7824
7825/**
7826 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7827 *
7828 * @returns The full FTW.
7829 * @param pFpuCtx The FPU context.
7830 */
7831IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7832{
7833 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7834 uint16_t u16Ftw = 0;
7835 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7836 for (unsigned iSt = 0; iSt < 8; iSt++)
7837 {
7838 unsigned const iReg = (iSt + iTop) & 7;
7839 if (!(u8Ftw & RT_BIT(iReg)))
7840 u16Ftw |= 3 << (iReg * 2); /* empty */
7841 else
7842 {
7843 uint16_t uTag;
7844 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7845 if (pr80Reg->s.uExponent == 0x7fff)
7846 uTag = 2; /* Exponent is all 1's => Special. */
7847 else if (pr80Reg->s.uExponent == 0x0000)
7848 {
7849 if (pr80Reg->s.u64Mantissa == 0x0000)
7850 uTag = 1; /* All bits are zero => Zero. */
7851 else
7852 uTag = 2; /* Must be special. */
7853 }
7854 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7855 uTag = 0; /* Valid. */
7856 else
7857 uTag = 2; /* Must be special. */
7858
7859 u16Ftw |= uTag << (iReg * 2); /* empty */
7860 }
7861 }
7862
7863 return u16Ftw;
7864}
7865
7866
7867/**
7868 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7869 *
7870 * @returns The compressed FTW.
7871 * @param u16FullFtw The full FTW to convert.
7872 */
7873IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7874{
7875 uint8_t u8Ftw = 0;
7876 for (unsigned i = 0; i < 8; i++)
7877 {
7878 if ((u16FullFtw & 3) != 3 /*empty*/)
7879 u8Ftw |= RT_BIT(i);
7880 u16FullFtw >>= 2;
7881 }
7882
7883 return u8Ftw;
7884}
7885
7886/** @} */
7887
7888
7889/** @name Memory access.
7890 *
7891 * @{
7892 */
7893
7894
7895/**
7896 * Updates the IEMCPU::cbWritten counter if applicable.
7897 *
7898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7899 * @param fAccess The access being accounted for.
7900 * @param cbMem The access size.
7901 */
7902DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7903{
7904 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7905 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7906 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7907}
7908
7909
7910/**
7911 * Checks if the given segment can be written to, raise the appropriate
7912 * exception if not.
7913 *
7914 * @returns VBox strict status code.
7915 *
7916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7917 * @param pHid Pointer to the hidden register.
7918 * @param iSegReg The register number.
7919 * @param pu64BaseAddr Where to return the base address to use for the
7920 * segment. (In 64-bit code it may differ from the
7921 * base in the hidden segment.)
7922 */
7923IEM_STATIC VBOXSTRICTRC
7924iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7925{
7926 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7927 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7928 else
7929 {
7930 if (!pHid->Attr.n.u1Present)
7931 {
7932 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7933 AssertRelease(uSel == 0);
7934 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7935 return iemRaiseGeneralProtectionFault0(pVCpu);
7936 }
7937
7938 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7939 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7940 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7941 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7942 *pu64BaseAddr = pHid->u64Base;
7943 }
7944 return VINF_SUCCESS;
7945}
7946
7947
7948/**
7949 * Checks if the given segment can be read from, raise the appropriate
7950 * exception if not.
7951 *
7952 * @returns VBox strict status code.
7953 *
7954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7955 * @param pHid Pointer to the hidden register.
7956 * @param iSegReg The register number.
7957 * @param pu64BaseAddr Where to return the base address to use for the
7958 * segment. (In 64-bit code it may differ from the
7959 * base in the hidden segment.)
7960 */
7961IEM_STATIC VBOXSTRICTRC
7962iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7963{
7964 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7965 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7966 else
7967 {
7968 if (!pHid->Attr.n.u1Present)
7969 {
7970 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7971 AssertRelease(uSel == 0);
7972 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7973 return iemRaiseGeneralProtectionFault0(pVCpu);
7974 }
7975
7976 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7977 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7978 *pu64BaseAddr = pHid->u64Base;
7979 }
7980 return VINF_SUCCESS;
7981}
7982
7983
7984/**
7985 * Applies the segment limit, base and attributes.
7986 *
7987 * This may raise a \#GP or \#SS.
7988 *
7989 * @returns VBox strict status code.
7990 *
7991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7992 * @param fAccess The kind of access which is being performed.
7993 * @param iSegReg The index of the segment register to apply.
7994 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7995 * TSS, ++).
7996 * @param cbMem The access size.
7997 * @param pGCPtrMem Pointer to the guest memory address to apply
7998 * segmentation to. Input and output parameter.
7999 */
8000IEM_STATIC VBOXSTRICTRC
8001iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8002{
8003 if (iSegReg == UINT8_MAX)
8004 return VINF_SUCCESS;
8005
8006 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8007 switch (pVCpu->iem.s.enmCpuMode)
8008 {
8009 case IEMMODE_16BIT:
8010 case IEMMODE_32BIT:
8011 {
8012 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8013 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8014
8015 if ( pSel->Attr.n.u1Present
8016 && !pSel->Attr.n.u1Unusable)
8017 {
8018 Assert(pSel->Attr.n.u1DescType);
8019 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8020 {
8021 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8022 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8023 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8024
8025 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8026 {
8027 /** @todo CPL check. */
8028 }
8029
8030 /*
8031 * There are two kinds of data selectors, normal and expand down.
8032 */
8033 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8034 {
8035 if ( GCPtrFirst32 > pSel->u32Limit
8036 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8037 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8038 }
8039 else
8040 {
8041 /*
8042 * The upper boundary is defined by the B bit, not the G bit!
8043 */
8044 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8045 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8046 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8047 }
8048 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8049 }
8050 else
8051 {
8052
8053 /*
8054 * Code selector and usually be used to read thru, writing is
8055 * only permitted in real and V8086 mode.
8056 */
8057 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8058 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8059 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8060 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8061 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8062
8063 if ( GCPtrFirst32 > pSel->u32Limit
8064 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8065 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8066
8067 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8068 {
8069 /** @todo CPL check. */
8070 }
8071
8072 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8073 }
8074 }
8075 else
8076 return iemRaiseGeneralProtectionFault0(pVCpu);
8077 return VINF_SUCCESS;
8078 }
8079
8080 case IEMMODE_64BIT:
8081 {
8082 RTGCPTR GCPtrMem = *pGCPtrMem;
8083 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8084 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8085
8086 Assert(cbMem >= 1);
8087 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8088 return VINF_SUCCESS;
8089 return iemRaiseGeneralProtectionFault0(pVCpu);
8090 }
8091
8092 default:
8093 AssertFailedReturn(VERR_IEM_IPE_7);
8094 }
8095}
8096
8097
8098/**
8099 * Translates a virtual address to a physical physical address and checks if we
8100 * can access the page as specified.
8101 *
8102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8103 * @param GCPtrMem The virtual address.
8104 * @param fAccess The intended access.
8105 * @param pGCPhysMem Where to return the physical address.
8106 */
8107IEM_STATIC VBOXSTRICTRC
8108iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8109{
8110 /** @todo Need a different PGM interface here. We're currently using
8111 * generic / REM interfaces. this won't cut it for R0 & RC. */
8112 RTGCPHYS GCPhys;
8113 uint64_t fFlags;
8114 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8115 if (RT_FAILURE(rc))
8116 {
8117 /** @todo Check unassigned memory in unpaged mode. */
8118 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8119 *pGCPhysMem = NIL_RTGCPHYS;
8120 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8121 }
8122
8123 /* If the page is writable and does not have the no-exec bit set, all
8124 access is allowed. Otherwise we'll have to check more carefully... */
8125 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8126 {
8127 /* Write to read only memory? */
8128 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8129 && !(fFlags & X86_PTE_RW)
8130 && ( (pVCpu->iem.s.uCpl == 3
8131 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8132 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8133 {
8134 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8135 *pGCPhysMem = NIL_RTGCPHYS;
8136 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8137 }
8138
8139 /* Kernel memory accessed by userland? */
8140 if ( !(fFlags & X86_PTE_US)
8141 && pVCpu->iem.s.uCpl == 3
8142 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8143 {
8144 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8145 *pGCPhysMem = NIL_RTGCPHYS;
8146 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8147 }
8148
8149 /* Executing non-executable memory? */
8150 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8151 && (fFlags & X86_PTE_PAE_NX)
8152 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8153 {
8154 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8155 *pGCPhysMem = NIL_RTGCPHYS;
8156 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8157 VERR_ACCESS_DENIED);
8158 }
8159 }
8160
8161 /*
8162 * Set the dirty / access flags.
8163 * ASSUMES this is set when the address is translated rather than on committ...
8164 */
8165 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8166 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8167 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8168 {
8169 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8170 AssertRC(rc2);
8171 }
8172
8173 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8174 *pGCPhysMem = GCPhys;
8175 return VINF_SUCCESS;
8176}
8177
8178
8179
8180/**
8181 * Maps a physical page.
8182 *
8183 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8185 * @param GCPhysMem The physical address.
8186 * @param fAccess The intended access.
8187 * @param ppvMem Where to return the mapping address.
8188 * @param pLock The PGM lock.
8189 */
8190IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8191{
8192#ifdef IEM_VERIFICATION_MODE_FULL
8193 /* Force the alternative path so we can ignore writes. */
8194 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8195 {
8196 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8197 {
8198 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8199 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8200 if (RT_FAILURE(rc2))
8201 pVCpu->iem.s.fProblematicMemory = true;
8202 }
8203 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8204 }
8205#endif
8206#ifdef IEM_LOG_MEMORY_WRITES
8207 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8208 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8209#endif
8210#ifdef IEM_VERIFICATION_MODE_MINIMAL
8211 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8212#endif
8213
8214 /** @todo This API may require some improving later. A private deal with PGM
8215 * regarding locking and unlocking needs to be struct. A couple of TLBs
8216 * living in PGM, but with publicly accessible inlined access methods
8217 * could perhaps be an even better solution. */
8218 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8219 GCPhysMem,
8220 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8221 pVCpu->iem.s.fBypassHandlers,
8222 ppvMem,
8223 pLock);
8224 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8225 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8226
8227#ifdef IEM_VERIFICATION_MODE_FULL
8228 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8229 pVCpu->iem.s.fProblematicMemory = true;
8230#endif
8231 return rc;
8232}
8233
8234
8235/**
8236 * Unmap a page previously mapped by iemMemPageMap.
8237 *
8238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8239 * @param GCPhysMem The physical address.
8240 * @param fAccess The intended access.
8241 * @param pvMem What iemMemPageMap returned.
8242 * @param pLock The PGM lock.
8243 */
8244DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8245{
8246 NOREF(pVCpu);
8247 NOREF(GCPhysMem);
8248 NOREF(fAccess);
8249 NOREF(pvMem);
8250 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8251}
8252
8253
8254/**
8255 * Looks up a memory mapping entry.
8256 *
8257 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8259 * @param pvMem The memory address.
8260 * @param fAccess The access to.
8261 */
8262DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8263{
8264 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8265 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8266 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8267 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8268 return 0;
8269 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8270 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8271 return 1;
8272 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8273 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8274 return 2;
8275 return VERR_NOT_FOUND;
8276}
8277
8278
8279/**
8280 * Finds a free memmap entry when using iNextMapping doesn't work.
8281 *
8282 * @returns Memory mapping index, 1024 on failure.
8283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8284 */
8285IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8286{
8287 /*
8288 * The easy case.
8289 */
8290 if (pVCpu->iem.s.cActiveMappings == 0)
8291 {
8292 pVCpu->iem.s.iNextMapping = 1;
8293 return 0;
8294 }
8295
8296 /* There should be enough mappings for all instructions. */
8297 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8298
8299 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8300 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8301 return i;
8302
8303 AssertFailedReturn(1024);
8304}
8305
8306
8307/**
8308 * Commits a bounce buffer that needs writing back and unmaps it.
8309 *
8310 * @returns Strict VBox status code.
8311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8312 * @param iMemMap The index of the buffer to commit.
8313 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8314 * Always false in ring-3, obviously.
8315 */
8316IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8317{
8318 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8319 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8320#ifdef IN_RING3
8321 Assert(!fPostponeFail);
8322 RT_NOREF_PV(fPostponeFail);
8323#endif
8324
8325 /*
8326 * Do the writing.
8327 */
8328#ifndef IEM_VERIFICATION_MODE_MINIMAL
8329 PVM pVM = pVCpu->CTX_SUFF(pVM);
8330 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8331 && !IEM_VERIFICATION_ENABLED(pVCpu))
8332 {
8333 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8334 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8335 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8336 if (!pVCpu->iem.s.fBypassHandlers)
8337 {
8338 /*
8339 * Carefully and efficiently dealing with access handler return
8340 * codes make this a little bloated.
8341 */
8342 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8343 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8344 pbBuf,
8345 cbFirst,
8346 PGMACCESSORIGIN_IEM);
8347 if (rcStrict == VINF_SUCCESS)
8348 {
8349 if (cbSecond)
8350 {
8351 rcStrict = PGMPhysWrite(pVM,
8352 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8353 pbBuf + cbFirst,
8354 cbSecond,
8355 PGMACCESSORIGIN_IEM);
8356 if (rcStrict == VINF_SUCCESS)
8357 { /* nothing */ }
8358 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8359 {
8360 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8361 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8362 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8363 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8364 }
8365# ifndef IN_RING3
8366 else if (fPostponeFail)
8367 {
8368 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8369 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8370 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8371 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8372 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8373 return iemSetPassUpStatus(pVCpu, rcStrict);
8374 }
8375# endif
8376 else
8377 {
8378 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8379 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8380 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8381 return rcStrict;
8382 }
8383 }
8384 }
8385 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8386 {
8387 if (!cbSecond)
8388 {
8389 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8390 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8391 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8392 }
8393 else
8394 {
8395 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8396 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8397 pbBuf + cbFirst,
8398 cbSecond,
8399 PGMACCESSORIGIN_IEM);
8400 if (rcStrict2 == VINF_SUCCESS)
8401 {
8402 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8403 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8404 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8405 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8406 }
8407 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8408 {
8409 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8410 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8412 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8413 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8414 }
8415# ifndef IN_RING3
8416 else if (fPostponeFail)
8417 {
8418 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8419 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8420 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8421 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8422 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8423 return iemSetPassUpStatus(pVCpu, rcStrict);
8424 }
8425# endif
8426 else
8427 {
8428 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8429 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8430 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8431 return rcStrict2;
8432 }
8433 }
8434 }
8435# ifndef IN_RING3
8436 else if (fPostponeFail)
8437 {
8438 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8439 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8440 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8441 if (!cbSecond)
8442 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8443 else
8444 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8445 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8446 return iemSetPassUpStatus(pVCpu, rcStrict);
8447 }
8448# endif
8449 else
8450 {
8451 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8452 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8453 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8454 return rcStrict;
8455 }
8456 }
8457 else
8458 {
8459 /*
8460 * No access handlers, much simpler.
8461 */
8462 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8463 if (RT_SUCCESS(rc))
8464 {
8465 if (cbSecond)
8466 {
8467 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8468 if (RT_SUCCESS(rc))
8469 { /* likely */ }
8470 else
8471 {
8472 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8473 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8474 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8475 return rc;
8476 }
8477 }
8478 }
8479 else
8480 {
8481 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8482 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8483 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8484 return rc;
8485 }
8486 }
8487 }
8488#endif
8489
8490#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8491 /*
8492 * Record the write(s).
8493 */
8494 if (!pVCpu->iem.s.fNoRem)
8495 {
8496 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8497 if (pEvtRec)
8498 {
8499 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8500 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8501 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8502 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8503 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8504 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8505 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8506 }
8507 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8508 {
8509 pEvtRec = iemVerifyAllocRecord(pVCpu);
8510 if (pEvtRec)
8511 {
8512 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8513 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8514 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8515 memcpy(pEvtRec->u.RamWrite.ab,
8516 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8517 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8518 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8519 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8520 }
8521 }
8522 }
8523#endif
8524#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8525 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8526 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8527 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8528 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8529 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8530 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8531
8532 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8533 g_cbIemWrote = cbWrote;
8534 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8535#endif
8536
8537 /*
8538 * Free the mapping entry.
8539 */
8540 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8541 Assert(pVCpu->iem.s.cActiveMappings != 0);
8542 pVCpu->iem.s.cActiveMappings--;
8543 return VINF_SUCCESS;
8544}
8545
8546
8547/**
8548 * iemMemMap worker that deals with a request crossing pages.
8549 */
8550IEM_STATIC VBOXSTRICTRC
8551iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8552{
8553 /*
8554 * Do the address translations.
8555 */
8556 RTGCPHYS GCPhysFirst;
8557 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8558 if (rcStrict != VINF_SUCCESS)
8559 return rcStrict;
8560
8561 RTGCPHYS GCPhysSecond;
8562 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8563 fAccess, &GCPhysSecond);
8564 if (rcStrict != VINF_SUCCESS)
8565 return rcStrict;
8566 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8567
8568 PVM pVM = pVCpu->CTX_SUFF(pVM);
8569#ifdef IEM_VERIFICATION_MODE_FULL
8570 /*
8571 * Detect problematic memory when verifying so we can select
8572 * the right execution engine. (TLB: Redo this.)
8573 */
8574 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8575 {
8576 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8577 if (RT_SUCCESS(rc2))
8578 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8579 if (RT_FAILURE(rc2))
8580 pVCpu->iem.s.fProblematicMemory = true;
8581 }
8582#endif
8583
8584
8585 /*
8586 * Read in the current memory content if it's a read, execute or partial
8587 * write access.
8588 */
8589 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8590 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8591 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8592
8593 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8594 {
8595 if (!pVCpu->iem.s.fBypassHandlers)
8596 {
8597 /*
8598 * Must carefully deal with access handler status codes here,
8599 * makes the code a bit bloated.
8600 */
8601 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8602 if (rcStrict == VINF_SUCCESS)
8603 {
8604 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8605 if (rcStrict == VINF_SUCCESS)
8606 { /*likely */ }
8607 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8608 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8609 else
8610 {
8611 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8612 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8613 return rcStrict;
8614 }
8615 }
8616 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8617 {
8618 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8619 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8620 {
8621 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8622 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8623 }
8624 else
8625 {
8626 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8627 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8628 return rcStrict2;
8629 }
8630 }
8631 else
8632 {
8633 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8634 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8635 return rcStrict;
8636 }
8637 }
8638 else
8639 {
8640 /*
8641 * No informational status codes here, much more straight forward.
8642 */
8643 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8644 if (RT_SUCCESS(rc))
8645 {
8646 Assert(rc == VINF_SUCCESS);
8647 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8648 if (RT_SUCCESS(rc))
8649 Assert(rc == VINF_SUCCESS);
8650 else
8651 {
8652 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8653 return rc;
8654 }
8655 }
8656 else
8657 {
8658 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8659 return rc;
8660 }
8661 }
8662
8663#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8664 if ( !pVCpu->iem.s.fNoRem
8665 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8666 {
8667 /*
8668 * Record the reads.
8669 */
8670 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8671 if (pEvtRec)
8672 {
8673 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8674 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8675 pEvtRec->u.RamRead.cb = cbFirstPage;
8676 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8677 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8678 }
8679 pEvtRec = iemVerifyAllocRecord(pVCpu);
8680 if (pEvtRec)
8681 {
8682 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8683 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8684 pEvtRec->u.RamRead.cb = cbSecondPage;
8685 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8686 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8687 }
8688 }
8689#endif
8690 }
8691#ifdef VBOX_STRICT
8692 else
8693 memset(pbBuf, 0xcc, cbMem);
8694 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8695 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8696#endif
8697
8698 /*
8699 * Commit the bounce buffer entry.
8700 */
8701 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8702 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8703 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8704 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8705 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8706 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8707 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8708 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8709 pVCpu->iem.s.cActiveMappings++;
8710
8711 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8712 *ppvMem = pbBuf;
8713 return VINF_SUCCESS;
8714}
8715
8716
8717/**
8718 * iemMemMap woker that deals with iemMemPageMap failures.
8719 */
8720IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8721 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8722{
8723 /*
8724 * Filter out conditions we can handle and the ones which shouldn't happen.
8725 */
8726 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8727 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8728 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8729 {
8730 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8731 return rcMap;
8732 }
8733 pVCpu->iem.s.cPotentialExits++;
8734
8735 /*
8736 * Read in the current memory content if it's a read, execute or partial
8737 * write access.
8738 */
8739 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8740 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8741 {
8742 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8743 memset(pbBuf, 0xff, cbMem);
8744 else
8745 {
8746 int rc;
8747 if (!pVCpu->iem.s.fBypassHandlers)
8748 {
8749 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8750 if (rcStrict == VINF_SUCCESS)
8751 { /* nothing */ }
8752 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8753 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8754 else
8755 {
8756 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8757 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8758 return rcStrict;
8759 }
8760 }
8761 else
8762 {
8763 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8764 if (RT_SUCCESS(rc))
8765 { /* likely */ }
8766 else
8767 {
8768 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8769 GCPhysFirst, rc));
8770 return rc;
8771 }
8772 }
8773 }
8774
8775#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8776 if ( !pVCpu->iem.s.fNoRem
8777 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8778 {
8779 /*
8780 * Record the read.
8781 */
8782 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8783 if (pEvtRec)
8784 {
8785 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8786 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8787 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8788 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8789 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8790 }
8791 }
8792#endif
8793 }
8794#ifdef VBOX_STRICT
8795 else
8796 memset(pbBuf, 0xcc, cbMem);
8797#endif
8798#ifdef VBOX_STRICT
8799 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8800 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8801#endif
8802
8803 /*
8804 * Commit the bounce buffer entry.
8805 */
8806 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8807 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8808 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8809 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8810 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8811 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8812 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8813 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8814 pVCpu->iem.s.cActiveMappings++;
8815
8816 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8817 *ppvMem = pbBuf;
8818 return VINF_SUCCESS;
8819}
8820
8821
8822
8823/**
8824 * Maps the specified guest memory for the given kind of access.
8825 *
8826 * This may be using bounce buffering of the memory if it's crossing a page
8827 * boundary or if there is an access handler installed for any of it. Because
8828 * of lock prefix guarantees, we're in for some extra clutter when this
8829 * happens.
8830 *
8831 * This may raise a \#GP, \#SS, \#PF or \#AC.
8832 *
8833 * @returns VBox strict status code.
8834 *
8835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8836 * @param ppvMem Where to return the pointer to the mapped
8837 * memory.
8838 * @param cbMem The number of bytes to map. This is usually 1,
8839 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8840 * string operations it can be up to a page.
8841 * @param iSegReg The index of the segment register to use for
8842 * this access. The base and limits are checked.
8843 * Use UINT8_MAX to indicate that no segmentation
8844 * is required (for IDT, GDT and LDT accesses).
8845 * @param GCPtrMem The address of the guest memory.
8846 * @param fAccess How the memory is being accessed. The
8847 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8848 * how to map the memory, while the
8849 * IEM_ACCESS_WHAT_XXX bit is used when raising
8850 * exceptions.
8851 */
8852IEM_STATIC VBOXSTRICTRC
8853iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8854{
8855 /*
8856 * Check the input and figure out which mapping entry to use.
8857 */
8858 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8859 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8860 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8861
8862 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8863 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8864 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8865 {
8866 iMemMap = iemMemMapFindFree(pVCpu);
8867 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8868 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8869 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8870 pVCpu->iem.s.aMemMappings[2].fAccess),
8871 VERR_IEM_IPE_9);
8872 }
8873
8874 /*
8875 * Map the memory, checking that we can actually access it. If something
8876 * slightly complicated happens, fall back on bounce buffering.
8877 */
8878 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8879 if (rcStrict != VINF_SUCCESS)
8880 return rcStrict;
8881
8882 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8883 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8884
8885 RTGCPHYS GCPhysFirst;
8886 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8887 if (rcStrict != VINF_SUCCESS)
8888 return rcStrict;
8889
8890 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8891 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8892 if (fAccess & IEM_ACCESS_TYPE_READ)
8893 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8894
8895 void *pvMem;
8896 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8897 if (rcStrict != VINF_SUCCESS)
8898 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8899
8900 /*
8901 * Fill in the mapping table entry.
8902 */
8903 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8904 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8905 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8906 pVCpu->iem.s.cActiveMappings++;
8907
8908 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8909 *ppvMem = pvMem;
8910 return VINF_SUCCESS;
8911}
8912
8913
8914/**
8915 * Commits the guest memory if bounce buffered and unmaps it.
8916 *
8917 * @returns Strict VBox status code.
8918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8919 * @param pvMem The mapping.
8920 * @param fAccess The kind of access.
8921 */
8922IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8923{
8924 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8925 AssertReturn(iMemMap >= 0, iMemMap);
8926
8927 /* If it's bounce buffered, we may need to write back the buffer. */
8928 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8929 {
8930 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8931 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8932 }
8933 /* Otherwise unlock it. */
8934 else
8935 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8936
8937 /* Free the entry. */
8938 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8939 Assert(pVCpu->iem.s.cActiveMappings != 0);
8940 pVCpu->iem.s.cActiveMappings--;
8941 return VINF_SUCCESS;
8942}
8943
8944#ifdef IEM_WITH_SETJMP
8945
8946/**
8947 * Maps the specified guest memory for the given kind of access, longjmp on
8948 * error.
8949 *
8950 * This may be using bounce buffering of the memory if it's crossing a page
8951 * boundary or if there is an access handler installed for any of it. Because
8952 * of lock prefix guarantees, we're in for some extra clutter when this
8953 * happens.
8954 *
8955 * This may raise a \#GP, \#SS, \#PF or \#AC.
8956 *
8957 * @returns Pointer to the mapped memory.
8958 *
8959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8960 * @param cbMem The number of bytes to map. This is usually 1,
8961 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8962 * string operations it can be up to a page.
8963 * @param iSegReg The index of the segment register to use for
8964 * this access. The base and limits are checked.
8965 * Use UINT8_MAX to indicate that no segmentation
8966 * is required (for IDT, GDT and LDT accesses).
8967 * @param GCPtrMem The address of the guest memory.
8968 * @param fAccess How the memory is being accessed. The
8969 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8970 * how to map the memory, while the
8971 * IEM_ACCESS_WHAT_XXX bit is used when raising
8972 * exceptions.
8973 */
8974IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8975{
8976 /*
8977 * Check the input and figure out which mapping entry to use.
8978 */
8979 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8980 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8981 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8982
8983 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8984 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8985 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8986 {
8987 iMemMap = iemMemMapFindFree(pVCpu);
8988 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8989 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8990 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8991 pVCpu->iem.s.aMemMappings[2].fAccess),
8992 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8993 }
8994
8995 /*
8996 * Map the memory, checking that we can actually access it. If something
8997 * slightly complicated happens, fall back on bounce buffering.
8998 */
8999 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9000 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9001 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9002
9003 /* Crossing a page boundary? */
9004 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9005 { /* No (likely). */ }
9006 else
9007 {
9008 void *pvMem;
9009 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9010 if (rcStrict == VINF_SUCCESS)
9011 return pvMem;
9012 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9013 }
9014
9015 RTGCPHYS GCPhysFirst;
9016 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9017 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9018 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9019
9020 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9021 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9022 if (fAccess & IEM_ACCESS_TYPE_READ)
9023 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9024
9025 void *pvMem;
9026 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9027 if (rcStrict == VINF_SUCCESS)
9028 { /* likely */ }
9029 else
9030 {
9031 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9032 if (rcStrict == VINF_SUCCESS)
9033 return pvMem;
9034 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9035 }
9036
9037 /*
9038 * Fill in the mapping table entry.
9039 */
9040 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9041 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9042 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9043 pVCpu->iem.s.cActiveMappings++;
9044
9045 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9046 return pvMem;
9047}
9048
9049
9050/**
9051 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9052 *
9053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9054 * @param pvMem The mapping.
9055 * @param fAccess The kind of access.
9056 */
9057IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9058{
9059 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9060 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9061
9062 /* If it's bounce buffered, we may need to write back the buffer. */
9063 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9064 {
9065 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9066 {
9067 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9068 if (rcStrict == VINF_SUCCESS)
9069 return;
9070 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9071 }
9072 }
9073 /* Otherwise unlock it. */
9074 else
9075 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9076
9077 /* Free the entry. */
9078 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9079 Assert(pVCpu->iem.s.cActiveMappings != 0);
9080 pVCpu->iem.s.cActiveMappings--;
9081}
9082
9083#endif
9084
9085#ifndef IN_RING3
9086/**
9087 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9088 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9089 *
9090 * Allows the instruction to be completed and retired, while the IEM user will
9091 * return to ring-3 immediately afterwards and do the postponed writes there.
9092 *
9093 * @returns VBox status code (no strict statuses). Caller must check
9094 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9096 * @param pvMem The mapping.
9097 * @param fAccess The kind of access.
9098 */
9099IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9100{
9101 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9102 AssertReturn(iMemMap >= 0, iMemMap);
9103
9104 /* If it's bounce buffered, we may need to write back the buffer. */
9105 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9106 {
9107 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9108 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9109 }
9110 /* Otherwise unlock it. */
9111 else
9112 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9113
9114 /* Free the entry. */
9115 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9116 Assert(pVCpu->iem.s.cActiveMappings != 0);
9117 pVCpu->iem.s.cActiveMappings--;
9118 return VINF_SUCCESS;
9119}
9120#endif
9121
9122
9123/**
9124 * Rollbacks mappings, releasing page locks and such.
9125 *
9126 * The caller shall only call this after checking cActiveMappings.
9127 *
9128 * @returns Strict VBox status code to pass up.
9129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9130 */
9131IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9132{
9133 Assert(pVCpu->iem.s.cActiveMappings > 0);
9134
9135 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9136 while (iMemMap-- > 0)
9137 {
9138 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9139 if (fAccess != IEM_ACCESS_INVALID)
9140 {
9141 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9142 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9143 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9144 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9145 Assert(pVCpu->iem.s.cActiveMappings > 0);
9146 pVCpu->iem.s.cActiveMappings--;
9147 }
9148 }
9149}
9150
9151
9152/**
9153 * Fetches a data byte.
9154 *
9155 * @returns Strict VBox status code.
9156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9157 * @param pu8Dst Where to return the byte.
9158 * @param iSegReg The index of the segment register to use for
9159 * this access. The base and limits are checked.
9160 * @param GCPtrMem The address of the guest memory.
9161 */
9162IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9163{
9164 /* The lazy approach for now... */
9165 uint8_t const *pu8Src;
9166 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9167 if (rc == VINF_SUCCESS)
9168 {
9169 *pu8Dst = *pu8Src;
9170 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9171 }
9172 return rc;
9173}
9174
9175
9176#ifdef IEM_WITH_SETJMP
9177/**
9178 * Fetches a data byte, longjmp on error.
9179 *
9180 * @returns The byte.
9181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9182 * @param iSegReg The index of the segment register to use for
9183 * this access. The base and limits are checked.
9184 * @param GCPtrMem The address of the guest memory.
9185 */
9186DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9187{
9188 /* The lazy approach for now... */
9189 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9190 uint8_t const bRet = *pu8Src;
9191 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9192 return bRet;
9193}
9194#endif /* IEM_WITH_SETJMP */
9195
9196
9197/**
9198 * Fetches a data word.
9199 *
9200 * @returns Strict VBox status code.
9201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9202 * @param pu16Dst Where to return the word.
9203 * @param iSegReg The index of the segment register to use for
9204 * this access. The base and limits are checked.
9205 * @param GCPtrMem The address of the guest memory.
9206 */
9207IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9208{
9209 /* The lazy approach for now... */
9210 uint16_t const *pu16Src;
9211 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9212 if (rc == VINF_SUCCESS)
9213 {
9214 *pu16Dst = *pu16Src;
9215 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9216 }
9217 return rc;
9218}
9219
9220
9221#ifdef IEM_WITH_SETJMP
9222/**
9223 * Fetches a data word, longjmp on error.
9224 *
9225 * @returns The word
9226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9227 * @param iSegReg The index of the segment register to use for
9228 * this access. The base and limits are checked.
9229 * @param GCPtrMem The address of the guest memory.
9230 */
9231DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9232{
9233 /* The lazy approach for now... */
9234 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9235 uint16_t const u16Ret = *pu16Src;
9236 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9237 return u16Ret;
9238}
9239#endif
9240
9241
9242/**
9243 * Fetches a data dword.
9244 *
9245 * @returns Strict VBox status code.
9246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9247 * @param pu32Dst Where to return the dword.
9248 * @param iSegReg The index of the segment register to use for
9249 * this access. The base and limits are checked.
9250 * @param GCPtrMem The address of the guest memory.
9251 */
9252IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9253{
9254 /* The lazy approach for now... */
9255 uint32_t const *pu32Src;
9256 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9257 if (rc == VINF_SUCCESS)
9258 {
9259 *pu32Dst = *pu32Src;
9260 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9261 }
9262 return rc;
9263}
9264
9265
9266#ifdef IEM_WITH_SETJMP
9267
9268IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9269{
9270 Assert(cbMem >= 1);
9271 Assert(iSegReg < X86_SREG_COUNT);
9272
9273 /*
9274 * 64-bit mode is simpler.
9275 */
9276 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9277 {
9278 if (iSegReg >= X86_SREG_FS)
9279 {
9280 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9281 GCPtrMem += pSel->u64Base;
9282 }
9283
9284 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9285 return GCPtrMem;
9286 }
9287 /*
9288 * 16-bit and 32-bit segmentation.
9289 */
9290 else
9291 {
9292 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9293 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9294 == X86DESCATTR_P /* data, expand up */
9295 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9296 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9297 {
9298 /* expand up */
9299 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9300 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9301 && GCPtrLast32 > (uint32_t)GCPtrMem))
9302 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9303 }
9304 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9305 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9306 {
9307 /* expand down */
9308 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9309 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9310 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9311 && GCPtrLast32 > (uint32_t)GCPtrMem))
9312 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9313 }
9314 else
9315 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9316 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9317 }
9318 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9319}
9320
9321
9322IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9323{
9324 Assert(cbMem >= 1);
9325 Assert(iSegReg < X86_SREG_COUNT);
9326
9327 /*
9328 * 64-bit mode is simpler.
9329 */
9330 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9331 {
9332 if (iSegReg >= X86_SREG_FS)
9333 {
9334 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9335 GCPtrMem += pSel->u64Base;
9336 }
9337
9338 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9339 return GCPtrMem;
9340 }
9341 /*
9342 * 16-bit and 32-bit segmentation.
9343 */
9344 else
9345 {
9346 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9347 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9348 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9349 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9350 {
9351 /* expand up */
9352 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9353 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9354 && GCPtrLast32 > (uint32_t)GCPtrMem))
9355 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9356 }
9357 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9358 {
9359 /* expand down */
9360 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9361 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9362 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9363 && GCPtrLast32 > (uint32_t)GCPtrMem))
9364 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9365 }
9366 else
9367 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9368 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9369 }
9370 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9371}
9372
9373
9374/**
9375 * Fetches a data dword, longjmp on error, fallback/safe version.
9376 *
9377 * @returns The dword
9378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9379 * @param iSegReg The index of the segment register to use for
9380 * this access. The base and limits are checked.
9381 * @param GCPtrMem The address of the guest memory.
9382 */
9383IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9384{
9385 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9386 uint32_t const u32Ret = *pu32Src;
9387 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9388 return u32Ret;
9389}
9390
9391
9392/**
9393 * Fetches a data dword, longjmp on error.
9394 *
9395 * @returns The dword
9396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9397 * @param iSegReg The index of the segment register to use for
9398 * this access. The base and limits are checked.
9399 * @param GCPtrMem The address of the guest memory.
9400 */
9401DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9402{
9403# ifdef IEM_WITH_DATA_TLB
9404 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9405 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9406 {
9407 /// @todo more later.
9408 }
9409
9410 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9411# else
9412 /* The lazy approach. */
9413 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9414 uint32_t const u32Ret = *pu32Src;
9415 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9416 return u32Ret;
9417# endif
9418}
9419#endif
9420
9421
9422#ifdef SOME_UNUSED_FUNCTION
9423/**
9424 * Fetches a data dword and sign extends it to a qword.
9425 *
9426 * @returns Strict VBox status code.
9427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9428 * @param pu64Dst Where to return the sign extended value.
9429 * @param iSegReg The index of the segment register to use for
9430 * this access. The base and limits are checked.
9431 * @param GCPtrMem The address of the guest memory.
9432 */
9433IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9434{
9435 /* The lazy approach for now... */
9436 int32_t const *pi32Src;
9437 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9438 if (rc == VINF_SUCCESS)
9439 {
9440 *pu64Dst = *pi32Src;
9441 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9442 }
9443#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9444 else
9445 *pu64Dst = 0;
9446#endif
9447 return rc;
9448}
9449#endif
9450
9451
9452/**
9453 * Fetches a data qword.
9454 *
9455 * @returns Strict VBox status code.
9456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9457 * @param pu64Dst Where to return the qword.
9458 * @param iSegReg The index of the segment register to use for
9459 * this access. The base and limits are checked.
9460 * @param GCPtrMem The address of the guest memory.
9461 */
9462IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9463{
9464 /* The lazy approach for now... */
9465 uint64_t const *pu64Src;
9466 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9467 if (rc == VINF_SUCCESS)
9468 {
9469 *pu64Dst = *pu64Src;
9470 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9471 }
9472 return rc;
9473}
9474
9475
9476#ifdef IEM_WITH_SETJMP
9477/**
9478 * Fetches a data qword, longjmp on error.
9479 *
9480 * @returns The qword.
9481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9482 * @param iSegReg The index of the segment register to use for
9483 * this access. The base and limits are checked.
9484 * @param GCPtrMem The address of the guest memory.
9485 */
9486DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9487{
9488 /* The lazy approach for now... */
9489 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9490 uint64_t const u64Ret = *pu64Src;
9491 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9492 return u64Ret;
9493}
9494#endif
9495
9496
9497/**
9498 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9499 *
9500 * @returns Strict VBox status code.
9501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9502 * @param pu64Dst Where to return the qword.
9503 * @param iSegReg The index of the segment register to use for
9504 * this access. The base and limits are checked.
9505 * @param GCPtrMem The address of the guest memory.
9506 */
9507IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9508{
9509 /* The lazy approach for now... */
9510 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9511 if (RT_UNLIKELY(GCPtrMem & 15))
9512 return iemRaiseGeneralProtectionFault0(pVCpu);
9513
9514 uint64_t const *pu64Src;
9515 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9516 if (rc == VINF_SUCCESS)
9517 {
9518 *pu64Dst = *pu64Src;
9519 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9520 }
9521 return rc;
9522}
9523
9524
9525#ifdef IEM_WITH_SETJMP
9526/**
9527 * Fetches a data qword, longjmp on error.
9528 *
9529 * @returns The qword.
9530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9531 * @param iSegReg The index of the segment register to use for
9532 * this access. The base and limits are checked.
9533 * @param GCPtrMem The address of the guest memory.
9534 */
9535DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9536{
9537 /* The lazy approach for now... */
9538 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9539 if (RT_LIKELY(!(GCPtrMem & 15)))
9540 {
9541 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9542 uint64_t const u64Ret = *pu64Src;
9543 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9544 return u64Ret;
9545 }
9546
9547 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9548 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9549}
9550#endif
9551
9552
9553/**
9554 * Fetches a data tword.
9555 *
9556 * @returns Strict VBox status code.
9557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9558 * @param pr80Dst Where to return the tword.
9559 * @param iSegReg The index of the segment register to use for
9560 * this access. The base and limits are checked.
9561 * @param GCPtrMem The address of the guest memory.
9562 */
9563IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9564{
9565 /* The lazy approach for now... */
9566 PCRTFLOAT80U pr80Src;
9567 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9568 if (rc == VINF_SUCCESS)
9569 {
9570 *pr80Dst = *pr80Src;
9571 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9572 }
9573 return rc;
9574}
9575
9576
9577#ifdef IEM_WITH_SETJMP
9578/**
9579 * Fetches a data tword, longjmp on error.
9580 *
9581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9582 * @param pr80Dst Where to return the tword.
9583 * @param iSegReg The index of the segment register to use for
9584 * this access. The base and limits are checked.
9585 * @param GCPtrMem The address of the guest memory.
9586 */
9587DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9588{
9589 /* The lazy approach for now... */
9590 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9591 *pr80Dst = *pr80Src;
9592 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9593}
9594#endif
9595
9596
9597/**
9598 * Fetches a data dqword (double qword), generally SSE related.
9599 *
9600 * @returns Strict VBox status code.
9601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9602 * @param pu128Dst Where to return the qword.
9603 * @param iSegReg The index of the segment register to use for
9604 * this access. The base and limits are checked.
9605 * @param GCPtrMem The address of the guest memory.
9606 */
9607IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9608{
9609 /* The lazy approach for now... */
9610 PCRTUINT128U pu128Src;
9611 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9612 if (rc == VINF_SUCCESS)
9613 {
9614 pu128Dst->au64[0] = pu128Src->au64[0];
9615 pu128Dst->au64[1] = pu128Src->au64[1];
9616 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9617 }
9618 return rc;
9619}
9620
9621
9622#ifdef IEM_WITH_SETJMP
9623/**
9624 * Fetches a data dqword (double qword), generally SSE related.
9625 *
9626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9627 * @param pu128Dst Where to return the qword.
9628 * @param iSegReg The index of the segment register to use for
9629 * this access. The base and limits are checked.
9630 * @param GCPtrMem The address of the guest memory.
9631 */
9632IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9633{
9634 /* The lazy approach for now... */
9635 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9636 pu128Dst->au64[0] = pu128Src->au64[0];
9637 pu128Dst->au64[1] = pu128Src->au64[1];
9638 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9639}
9640#endif
9641
9642
9643/**
9644 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9645 * related.
9646 *
9647 * Raises \#GP(0) if not aligned.
9648 *
9649 * @returns Strict VBox status code.
9650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9651 * @param pu128Dst Where to return the qword.
9652 * @param iSegReg The index of the segment register to use for
9653 * this access. The base and limits are checked.
9654 * @param GCPtrMem The address of the guest memory.
9655 */
9656IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9657{
9658 /* The lazy approach for now... */
9659 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9660 if ( (GCPtrMem & 15)
9661 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9662 return iemRaiseGeneralProtectionFault0(pVCpu);
9663
9664 PCRTUINT128U pu128Src;
9665 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9666 if (rc == VINF_SUCCESS)
9667 {
9668 pu128Dst->au64[0] = pu128Src->au64[0];
9669 pu128Dst->au64[1] = pu128Src->au64[1];
9670 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9671 }
9672 return rc;
9673}
9674
9675
9676#ifdef IEM_WITH_SETJMP
9677/**
9678 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9679 * related, longjmp on error.
9680 *
9681 * Raises \#GP(0) if not aligned.
9682 *
9683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9684 * @param pu128Dst Where to return the qword.
9685 * @param iSegReg The index of the segment register to use for
9686 * this access. The base and limits are checked.
9687 * @param GCPtrMem The address of the guest memory.
9688 */
9689DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9690{
9691 /* The lazy approach for now... */
9692 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9693 if ( (GCPtrMem & 15) == 0
9694 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9695 {
9696 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9697 pu128Dst->au64[0] = pu128Src->au64[0];
9698 pu128Dst->au64[1] = pu128Src->au64[1];
9699 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9700 return;
9701 }
9702
9703 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9704 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9705}
9706#endif
9707
9708
9709/**
9710 * Fetches a data oword (octo word), generally AVX related.
9711 *
9712 * @returns Strict VBox status code.
9713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9714 * @param pu256Dst Where to return the qword.
9715 * @param iSegReg The index of the segment register to use for
9716 * this access. The base and limits are checked.
9717 * @param GCPtrMem The address of the guest memory.
9718 */
9719IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9720{
9721 /* The lazy approach for now... */
9722 PCRTUINT256U pu256Src;
9723 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9724 if (rc == VINF_SUCCESS)
9725 {
9726 pu256Dst->au64[0] = pu256Src->au64[0];
9727 pu256Dst->au64[1] = pu256Src->au64[1];
9728 pu256Dst->au64[2] = pu256Src->au64[2];
9729 pu256Dst->au64[3] = pu256Src->au64[3];
9730 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9731 }
9732 return rc;
9733}
9734
9735
9736#ifdef IEM_WITH_SETJMP
9737/**
9738 * Fetches a data oword (octo word), generally AVX related.
9739 *
9740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9741 * @param pu256Dst Where to return the qword.
9742 * @param iSegReg The index of the segment register to use for
9743 * this access. The base and limits are checked.
9744 * @param GCPtrMem The address of the guest memory.
9745 */
9746IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9747{
9748 /* The lazy approach for now... */
9749 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9750 pu256Dst->au64[0] = pu256Src->au64[0];
9751 pu256Dst->au64[1] = pu256Src->au64[1];
9752 pu256Dst->au64[2] = pu256Src->au64[2];
9753 pu256Dst->au64[3] = pu256Src->au64[3];
9754 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9755}
9756#endif
9757
9758
9759/**
9760 * Fetches a data oword (octo word) at an aligned address, generally AVX
9761 * related.
9762 *
9763 * Raises \#GP(0) if not aligned.
9764 *
9765 * @returns Strict VBox status code.
9766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9767 * @param pu256Dst Where to return the qword.
9768 * @param iSegReg The index of the segment register to use for
9769 * this access. The base and limits are checked.
9770 * @param GCPtrMem The address of the guest memory.
9771 */
9772IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9773{
9774 /* The lazy approach for now... */
9775 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9776 if (GCPtrMem & 31)
9777 return iemRaiseGeneralProtectionFault0(pVCpu);
9778
9779 PCRTUINT256U pu256Src;
9780 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9781 if (rc == VINF_SUCCESS)
9782 {
9783 pu256Dst->au64[0] = pu256Src->au64[0];
9784 pu256Dst->au64[1] = pu256Src->au64[1];
9785 pu256Dst->au64[2] = pu256Src->au64[2];
9786 pu256Dst->au64[3] = pu256Src->au64[3];
9787 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9788 }
9789 return rc;
9790}
9791
9792
9793#ifdef IEM_WITH_SETJMP
9794/**
9795 * Fetches a data oword (octo word) at an aligned address, generally AVX
9796 * related, longjmp on error.
9797 *
9798 * Raises \#GP(0) if not aligned.
9799 *
9800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9801 * @param pu256Dst Where to return the qword.
9802 * @param iSegReg The index of the segment register to use for
9803 * this access. The base and limits are checked.
9804 * @param GCPtrMem The address of the guest memory.
9805 */
9806DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9807{
9808 /* The lazy approach for now... */
9809 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9810 if ((GCPtrMem & 31) == 0)
9811 {
9812 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9813 pu256Dst->au64[0] = pu256Src->au64[0];
9814 pu256Dst->au64[1] = pu256Src->au64[1];
9815 pu256Dst->au64[2] = pu256Src->au64[2];
9816 pu256Dst->au64[3] = pu256Src->au64[3];
9817 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9818 return;
9819 }
9820
9821 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9822 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9823}
9824#endif
9825
9826
9827
9828/**
9829 * Fetches a descriptor register (lgdt, lidt).
9830 *
9831 * @returns Strict VBox status code.
9832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9833 * @param pcbLimit Where to return the limit.
9834 * @param pGCPtrBase Where to return the base.
9835 * @param iSegReg The index of the segment register to use for
9836 * this access. The base and limits are checked.
9837 * @param GCPtrMem The address of the guest memory.
9838 * @param enmOpSize The effective operand size.
9839 */
9840IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9841 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9842{
9843 /*
9844 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9845 * little special:
9846 * - The two reads are done separately.
9847 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9848 * - We suspect the 386 to actually commit the limit before the base in
9849 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9850 * don't try emulate this eccentric behavior, because it's not well
9851 * enough understood and rather hard to trigger.
9852 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9853 */
9854 VBOXSTRICTRC rcStrict;
9855 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9856 {
9857 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9858 if (rcStrict == VINF_SUCCESS)
9859 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9860 }
9861 else
9862 {
9863 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9864 if (enmOpSize == IEMMODE_32BIT)
9865 {
9866 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9867 {
9868 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9869 if (rcStrict == VINF_SUCCESS)
9870 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9871 }
9872 else
9873 {
9874 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9875 if (rcStrict == VINF_SUCCESS)
9876 {
9877 *pcbLimit = (uint16_t)uTmp;
9878 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9879 }
9880 }
9881 if (rcStrict == VINF_SUCCESS)
9882 *pGCPtrBase = uTmp;
9883 }
9884 else
9885 {
9886 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9887 if (rcStrict == VINF_SUCCESS)
9888 {
9889 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9890 if (rcStrict == VINF_SUCCESS)
9891 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9892 }
9893 }
9894 }
9895 return rcStrict;
9896}
9897
9898
9899
9900/**
9901 * Stores a data byte.
9902 *
9903 * @returns Strict VBox status code.
9904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9905 * @param iSegReg The index of the segment register to use for
9906 * this access. The base and limits are checked.
9907 * @param GCPtrMem The address of the guest memory.
9908 * @param u8Value The value to store.
9909 */
9910IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9911{
9912 /* The lazy approach for now... */
9913 uint8_t *pu8Dst;
9914 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9915 if (rc == VINF_SUCCESS)
9916 {
9917 *pu8Dst = u8Value;
9918 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9919 }
9920 return rc;
9921}
9922
9923
9924#ifdef IEM_WITH_SETJMP
9925/**
9926 * Stores a data byte, longjmp on error.
9927 *
9928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9929 * @param iSegReg The index of the segment register to use for
9930 * this access. The base and limits are checked.
9931 * @param GCPtrMem The address of the guest memory.
9932 * @param u8Value The value to store.
9933 */
9934IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9935{
9936 /* The lazy approach for now... */
9937 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9938 *pu8Dst = u8Value;
9939 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9940}
9941#endif
9942
9943
9944/**
9945 * Stores a data word.
9946 *
9947 * @returns Strict VBox status code.
9948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9949 * @param iSegReg The index of the segment register to use for
9950 * this access. The base and limits are checked.
9951 * @param GCPtrMem The address of the guest memory.
9952 * @param u16Value The value to store.
9953 */
9954IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9955{
9956 /* The lazy approach for now... */
9957 uint16_t *pu16Dst;
9958 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9959 if (rc == VINF_SUCCESS)
9960 {
9961 *pu16Dst = u16Value;
9962 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9963 }
9964 return rc;
9965}
9966
9967
9968#ifdef IEM_WITH_SETJMP
9969/**
9970 * Stores a data word, longjmp on error.
9971 *
9972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9973 * @param iSegReg The index of the segment register to use for
9974 * this access. The base and limits are checked.
9975 * @param GCPtrMem The address of the guest memory.
9976 * @param u16Value The value to store.
9977 */
9978IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9979{
9980 /* The lazy approach for now... */
9981 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9982 *pu16Dst = u16Value;
9983 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9984}
9985#endif
9986
9987
9988/**
9989 * Stores a data dword.
9990 *
9991 * @returns Strict VBox status code.
9992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9993 * @param iSegReg The index of the segment register to use for
9994 * this access. The base and limits are checked.
9995 * @param GCPtrMem The address of the guest memory.
9996 * @param u32Value The value to store.
9997 */
9998IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9999{
10000 /* The lazy approach for now... */
10001 uint32_t *pu32Dst;
10002 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10003 if (rc == VINF_SUCCESS)
10004 {
10005 *pu32Dst = u32Value;
10006 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10007 }
10008 return rc;
10009}
10010
10011
10012#ifdef IEM_WITH_SETJMP
10013/**
10014 * Stores a data dword.
10015 *
10016 * @returns Strict VBox status code.
10017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10018 * @param iSegReg The index of the segment register to use for
10019 * this access. The base and limits are checked.
10020 * @param GCPtrMem The address of the guest memory.
10021 * @param u32Value The value to store.
10022 */
10023IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10024{
10025 /* The lazy approach for now... */
10026 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10027 *pu32Dst = u32Value;
10028 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10029}
10030#endif
10031
10032
10033/**
10034 * Stores a data qword.
10035 *
10036 * @returns Strict VBox status code.
10037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10038 * @param iSegReg The index of the segment register to use for
10039 * this access. The base and limits are checked.
10040 * @param GCPtrMem The address of the guest memory.
10041 * @param u64Value The value to store.
10042 */
10043IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10044{
10045 /* The lazy approach for now... */
10046 uint64_t *pu64Dst;
10047 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10048 if (rc == VINF_SUCCESS)
10049 {
10050 *pu64Dst = u64Value;
10051 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10052 }
10053 return rc;
10054}
10055
10056
10057#ifdef IEM_WITH_SETJMP
10058/**
10059 * Stores a data qword, longjmp on error.
10060 *
10061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10062 * @param iSegReg The index of the segment register to use for
10063 * this access. The base and limits are checked.
10064 * @param GCPtrMem The address of the guest memory.
10065 * @param u64Value The value to store.
10066 */
10067IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10068{
10069 /* The lazy approach for now... */
10070 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10071 *pu64Dst = u64Value;
10072 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10073}
10074#endif
10075
10076
10077/**
10078 * Stores a data dqword.
10079 *
10080 * @returns Strict VBox status code.
10081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10082 * @param iSegReg The index of the segment register to use for
10083 * this access. The base and limits are checked.
10084 * @param GCPtrMem The address of the guest memory.
10085 * @param u128Value The value to store.
10086 */
10087IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10088{
10089 /* The lazy approach for now... */
10090 PRTUINT128U pu128Dst;
10091 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10092 if (rc == VINF_SUCCESS)
10093 {
10094 pu128Dst->au64[0] = u128Value.au64[0];
10095 pu128Dst->au64[1] = u128Value.au64[1];
10096 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10097 }
10098 return rc;
10099}
10100
10101
10102#ifdef IEM_WITH_SETJMP
10103/**
10104 * Stores a data dqword, longjmp on error.
10105 *
10106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10107 * @param iSegReg The index of the segment register to use for
10108 * this access. The base and limits are checked.
10109 * @param GCPtrMem The address of the guest memory.
10110 * @param u128Value The value to store.
10111 */
10112IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10113{
10114 /* The lazy approach for now... */
10115 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10116 pu128Dst->au64[0] = u128Value.au64[0];
10117 pu128Dst->au64[1] = u128Value.au64[1];
10118 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10119}
10120#endif
10121
10122
10123/**
10124 * Stores a data dqword, SSE aligned.
10125 *
10126 * @returns Strict VBox status code.
10127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10128 * @param iSegReg The index of the segment register to use for
10129 * this access. The base and limits are checked.
10130 * @param GCPtrMem The address of the guest memory.
10131 * @param u128Value The value to store.
10132 */
10133IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10134{
10135 /* The lazy approach for now... */
10136 if ( (GCPtrMem & 15)
10137 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10138 return iemRaiseGeneralProtectionFault0(pVCpu);
10139
10140 PRTUINT128U pu128Dst;
10141 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10142 if (rc == VINF_SUCCESS)
10143 {
10144 pu128Dst->au64[0] = u128Value.au64[0];
10145 pu128Dst->au64[1] = u128Value.au64[1];
10146 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10147 }
10148 return rc;
10149}
10150
10151
10152#ifdef IEM_WITH_SETJMP
10153/**
10154 * Stores a data dqword, SSE aligned.
10155 *
10156 * @returns Strict VBox status code.
10157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10158 * @param iSegReg The index of the segment register to use for
10159 * this access. The base and limits are checked.
10160 * @param GCPtrMem The address of the guest memory.
10161 * @param u128Value The value to store.
10162 */
10163DECL_NO_INLINE(IEM_STATIC, void)
10164iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10165{
10166 /* The lazy approach for now... */
10167 if ( (GCPtrMem & 15) == 0
10168 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10169 {
10170 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10171 pu128Dst->au64[0] = u128Value.au64[0];
10172 pu128Dst->au64[1] = u128Value.au64[1];
10173 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10174 return;
10175 }
10176
10177 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10178 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10179}
10180#endif
10181
10182
10183/**
10184 * Stores a descriptor register (sgdt, sidt).
10185 *
10186 * @returns Strict VBox status code.
10187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10188 * @param cbLimit The limit.
10189 * @param GCPtrBase The base address.
10190 * @param iSegReg The index of the segment register to use for
10191 * this access. The base and limits are checked.
10192 * @param GCPtrMem The address of the guest memory.
10193 */
10194IEM_STATIC VBOXSTRICTRC
10195iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10196{
10197 VBOXSTRICTRC rcStrict;
10198 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
10199 {
10200 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
10201 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
10202 }
10203
10204 /*
10205 * The SIDT and SGDT instructions actually stores the data using two
10206 * independent writes. The instructions does not respond to opsize prefixes.
10207 */
10208 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10209 if (rcStrict == VINF_SUCCESS)
10210 {
10211 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10212 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10213 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10214 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10215 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10216 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10217 else
10218 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10219 }
10220 return rcStrict;
10221}
10222
10223
10224/**
10225 * Pushes a word onto the stack.
10226 *
10227 * @returns Strict VBox status code.
10228 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10229 * @param u16Value The value to push.
10230 */
10231IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10232{
10233 /* Increment the stack pointer. */
10234 uint64_t uNewRsp;
10235 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10236 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10237
10238 /* Write the word the lazy way. */
10239 uint16_t *pu16Dst;
10240 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10241 if (rc == VINF_SUCCESS)
10242 {
10243 *pu16Dst = u16Value;
10244 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10245 }
10246
10247 /* Commit the new RSP value unless we an access handler made trouble. */
10248 if (rc == VINF_SUCCESS)
10249 pCtx->rsp = uNewRsp;
10250
10251 return rc;
10252}
10253
10254
10255/**
10256 * Pushes a dword onto the stack.
10257 *
10258 * @returns Strict VBox status code.
10259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10260 * @param u32Value The value to push.
10261 */
10262IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10263{
10264 /* Increment the stack pointer. */
10265 uint64_t uNewRsp;
10266 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10267 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10268
10269 /* Write the dword the lazy way. */
10270 uint32_t *pu32Dst;
10271 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10272 if (rc == VINF_SUCCESS)
10273 {
10274 *pu32Dst = u32Value;
10275 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10276 }
10277
10278 /* Commit the new RSP value unless we an access handler made trouble. */
10279 if (rc == VINF_SUCCESS)
10280 pCtx->rsp = uNewRsp;
10281
10282 return rc;
10283}
10284
10285
10286/**
10287 * Pushes a dword segment register value onto the stack.
10288 *
10289 * @returns Strict VBox status code.
10290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10291 * @param u32Value The value to push.
10292 */
10293IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10294{
10295 /* Increment the stack pointer. */
10296 uint64_t uNewRsp;
10297 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10298 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10299
10300 VBOXSTRICTRC rc;
10301 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10302 {
10303 /* The recompiler writes a full dword. */
10304 uint32_t *pu32Dst;
10305 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10306 if (rc == VINF_SUCCESS)
10307 {
10308 *pu32Dst = u32Value;
10309 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10310 }
10311 }
10312 else
10313 {
10314 /* The intel docs talks about zero extending the selector register
10315 value. My actual intel CPU here might be zero extending the value
10316 but it still only writes the lower word... */
10317 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10318 * happens when crossing an electric page boundrary, is the high word checked
10319 * for write accessibility or not? Probably it is. What about segment limits?
10320 * It appears this behavior is also shared with trap error codes.
10321 *
10322 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10323 * ancient hardware when it actually did change. */
10324 uint16_t *pu16Dst;
10325 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10326 if (rc == VINF_SUCCESS)
10327 {
10328 *pu16Dst = (uint16_t)u32Value;
10329 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10330 }
10331 }
10332
10333 /* Commit the new RSP value unless we an access handler made trouble. */
10334 if (rc == VINF_SUCCESS)
10335 pCtx->rsp = uNewRsp;
10336
10337 return rc;
10338}
10339
10340
10341/**
10342 * Pushes a qword onto the stack.
10343 *
10344 * @returns Strict VBox status code.
10345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10346 * @param u64Value The value to push.
10347 */
10348IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10349{
10350 /* Increment the stack pointer. */
10351 uint64_t uNewRsp;
10352 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10353 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10354
10355 /* Write the word the lazy way. */
10356 uint64_t *pu64Dst;
10357 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10358 if (rc == VINF_SUCCESS)
10359 {
10360 *pu64Dst = u64Value;
10361 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10362 }
10363
10364 /* Commit the new RSP value unless we an access handler made trouble. */
10365 if (rc == VINF_SUCCESS)
10366 pCtx->rsp = uNewRsp;
10367
10368 return rc;
10369}
10370
10371
10372/**
10373 * Pops a word from the stack.
10374 *
10375 * @returns Strict VBox status code.
10376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10377 * @param pu16Value Where to store the popped value.
10378 */
10379IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10380{
10381 /* Increment the stack pointer. */
10382 uint64_t uNewRsp;
10383 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10384 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10385
10386 /* Write the word the lazy way. */
10387 uint16_t const *pu16Src;
10388 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10389 if (rc == VINF_SUCCESS)
10390 {
10391 *pu16Value = *pu16Src;
10392 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10393
10394 /* Commit the new RSP value. */
10395 if (rc == VINF_SUCCESS)
10396 pCtx->rsp = uNewRsp;
10397 }
10398
10399 return rc;
10400}
10401
10402
10403/**
10404 * Pops a dword from the stack.
10405 *
10406 * @returns Strict VBox status code.
10407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10408 * @param pu32Value Where to store the popped value.
10409 */
10410IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10411{
10412 /* Increment the stack pointer. */
10413 uint64_t uNewRsp;
10414 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10415 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10416
10417 /* Write the word the lazy way. */
10418 uint32_t const *pu32Src;
10419 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10420 if (rc == VINF_SUCCESS)
10421 {
10422 *pu32Value = *pu32Src;
10423 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10424
10425 /* Commit the new RSP value. */
10426 if (rc == VINF_SUCCESS)
10427 pCtx->rsp = uNewRsp;
10428 }
10429
10430 return rc;
10431}
10432
10433
10434/**
10435 * Pops a qword from the stack.
10436 *
10437 * @returns Strict VBox status code.
10438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10439 * @param pu64Value Where to store the popped value.
10440 */
10441IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10442{
10443 /* Increment the stack pointer. */
10444 uint64_t uNewRsp;
10445 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10446 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10447
10448 /* Write the word the lazy way. */
10449 uint64_t const *pu64Src;
10450 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10451 if (rc == VINF_SUCCESS)
10452 {
10453 *pu64Value = *pu64Src;
10454 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10455
10456 /* Commit the new RSP value. */
10457 if (rc == VINF_SUCCESS)
10458 pCtx->rsp = uNewRsp;
10459 }
10460
10461 return rc;
10462}
10463
10464
10465/**
10466 * Pushes a word onto the stack, using a temporary stack pointer.
10467 *
10468 * @returns Strict VBox status code.
10469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10470 * @param u16Value The value to push.
10471 * @param pTmpRsp Pointer to the temporary stack pointer.
10472 */
10473IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10474{
10475 /* Increment the stack pointer. */
10476 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10477 RTUINT64U NewRsp = *pTmpRsp;
10478 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10479
10480 /* Write the word the lazy way. */
10481 uint16_t *pu16Dst;
10482 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10483 if (rc == VINF_SUCCESS)
10484 {
10485 *pu16Dst = u16Value;
10486 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10487 }
10488
10489 /* Commit the new RSP value unless we an access handler made trouble. */
10490 if (rc == VINF_SUCCESS)
10491 *pTmpRsp = NewRsp;
10492
10493 return rc;
10494}
10495
10496
10497/**
10498 * Pushes a dword onto the stack, using a temporary stack pointer.
10499 *
10500 * @returns Strict VBox status code.
10501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10502 * @param u32Value The value to push.
10503 * @param pTmpRsp Pointer to the temporary stack pointer.
10504 */
10505IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10506{
10507 /* Increment the stack pointer. */
10508 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10509 RTUINT64U NewRsp = *pTmpRsp;
10510 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10511
10512 /* Write the word the lazy way. */
10513 uint32_t *pu32Dst;
10514 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10515 if (rc == VINF_SUCCESS)
10516 {
10517 *pu32Dst = u32Value;
10518 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10519 }
10520
10521 /* Commit the new RSP value unless we an access handler made trouble. */
10522 if (rc == VINF_SUCCESS)
10523 *pTmpRsp = NewRsp;
10524
10525 return rc;
10526}
10527
10528
10529/**
10530 * Pushes a dword onto the stack, using a temporary stack pointer.
10531 *
10532 * @returns Strict VBox status code.
10533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10534 * @param u64Value The value to push.
10535 * @param pTmpRsp Pointer to the temporary stack pointer.
10536 */
10537IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10538{
10539 /* Increment the stack pointer. */
10540 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10541 RTUINT64U NewRsp = *pTmpRsp;
10542 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10543
10544 /* Write the word the lazy way. */
10545 uint64_t *pu64Dst;
10546 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10547 if (rc == VINF_SUCCESS)
10548 {
10549 *pu64Dst = u64Value;
10550 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10551 }
10552
10553 /* Commit the new RSP value unless we an access handler made trouble. */
10554 if (rc == VINF_SUCCESS)
10555 *pTmpRsp = NewRsp;
10556
10557 return rc;
10558}
10559
10560
10561/**
10562 * Pops a word from the stack, using a temporary stack pointer.
10563 *
10564 * @returns Strict VBox status code.
10565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10566 * @param pu16Value Where to store the popped value.
10567 * @param pTmpRsp Pointer to the temporary stack pointer.
10568 */
10569IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10570{
10571 /* Increment the stack pointer. */
10572 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10573 RTUINT64U NewRsp = *pTmpRsp;
10574 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10575
10576 /* Write the word the lazy way. */
10577 uint16_t const *pu16Src;
10578 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10579 if (rc == VINF_SUCCESS)
10580 {
10581 *pu16Value = *pu16Src;
10582 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10583
10584 /* Commit the new RSP value. */
10585 if (rc == VINF_SUCCESS)
10586 *pTmpRsp = NewRsp;
10587 }
10588
10589 return rc;
10590}
10591
10592
10593/**
10594 * Pops a dword from the stack, using a temporary stack pointer.
10595 *
10596 * @returns Strict VBox status code.
10597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10598 * @param pu32Value Where to store the popped value.
10599 * @param pTmpRsp Pointer to the temporary stack pointer.
10600 */
10601IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10602{
10603 /* Increment the stack pointer. */
10604 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10605 RTUINT64U NewRsp = *pTmpRsp;
10606 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10607
10608 /* Write the word the lazy way. */
10609 uint32_t const *pu32Src;
10610 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10611 if (rc == VINF_SUCCESS)
10612 {
10613 *pu32Value = *pu32Src;
10614 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10615
10616 /* Commit the new RSP value. */
10617 if (rc == VINF_SUCCESS)
10618 *pTmpRsp = NewRsp;
10619 }
10620
10621 return rc;
10622}
10623
10624
10625/**
10626 * Pops a qword from the stack, using a temporary stack pointer.
10627 *
10628 * @returns Strict VBox status code.
10629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10630 * @param pu64Value Where to store the popped value.
10631 * @param pTmpRsp Pointer to the temporary stack pointer.
10632 */
10633IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10634{
10635 /* Increment the stack pointer. */
10636 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10637 RTUINT64U NewRsp = *pTmpRsp;
10638 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10639
10640 /* Write the word the lazy way. */
10641 uint64_t const *pu64Src;
10642 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10643 if (rcStrict == VINF_SUCCESS)
10644 {
10645 *pu64Value = *pu64Src;
10646 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10647
10648 /* Commit the new RSP value. */
10649 if (rcStrict == VINF_SUCCESS)
10650 *pTmpRsp = NewRsp;
10651 }
10652
10653 return rcStrict;
10654}
10655
10656
10657/**
10658 * Begin a special stack push (used by interrupt, exceptions and such).
10659 *
10660 * This will raise \#SS or \#PF if appropriate.
10661 *
10662 * @returns Strict VBox status code.
10663 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10664 * @param cbMem The number of bytes to push onto the stack.
10665 * @param ppvMem Where to return the pointer to the stack memory.
10666 * As with the other memory functions this could be
10667 * direct access or bounce buffered access, so
10668 * don't commit register until the commit call
10669 * succeeds.
10670 * @param puNewRsp Where to return the new RSP value. This must be
10671 * passed unchanged to
10672 * iemMemStackPushCommitSpecial().
10673 */
10674IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10675{
10676 Assert(cbMem < UINT8_MAX);
10677 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10678 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10679 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10680}
10681
10682
10683/**
10684 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10685 *
10686 * This will update the rSP.
10687 *
10688 * @returns Strict VBox status code.
10689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10690 * @param pvMem The pointer returned by
10691 * iemMemStackPushBeginSpecial().
10692 * @param uNewRsp The new RSP value returned by
10693 * iemMemStackPushBeginSpecial().
10694 */
10695IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10696{
10697 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10698 if (rcStrict == VINF_SUCCESS)
10699 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10700 return rcStrict;
10701}
10702
10703
10704/**
10705 * Begin a special stack pop (used by iret, retf and such).
10706 *
10707 * This will raise \#SS or \#PF if appropriate.
10708 *
10709 * @returns Strict VBox status code.
10710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10711 * @param cbMem The number of bytes to pop from the stack.
10712 * @param ppvMem Where to return the pointer to the stack memory.
10713 * @param puNewRsp Where to return the new RSP value. This must be
10714 * assigned to CPUMCTX::rsp manually some time
10715 * after iemMemStackPopDoneSpecial() has been
10716 * called.
10717 */
10718IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10719{
10720 Assert(cbMem < UINT8_MAX);
10721 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10722 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10723 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10724}
10725
10726
10727/**
10728 * Continue a special stack pop (used by iret and retf).
10729 *
10730 * This will raise \#SS or \#PF if appropriate.
10731 *
10732 * @returns Strict VBox status code.
10733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10734 * @param cbMem The number of bytes to pop from the stack.
10735 * @param ppvMem Where to return the pointer to the stack memory.
10736 * @param puNewRsp Where to return the new RSP value. This must be
10737 * assigned to CPUMCTX::rsp manually some time
10738 * after iemMemStackPopDoneSpecial() has been
10739 * called.
10740 */
10741IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10742{
10743 Assert(cbMem < UINT8_MAX);
10744 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10745 RTUINT64U NewRsp;
10746 NewRsp.u = *puNewRsp;
10747 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10748 *puNewRsp = NewRsp.u;
10749 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10750}
10751
10752
10753/**
10754 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10755 * iemMemStackPopContinueSpecial).
10756 *
10757 * The caller will manually commit the rSP.
10758 *
10759 * @returns Strict VBox status code.
10760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10761 * @param pvMem The pointer returned by
10762 * iemMemStackPopBeginSpecial() or
10763 * iemMemStackPopContinueSpecial().
10764 */
10765IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10766{
10767 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10768}
10769
10770
10771/**
10772 * Fetches a system table byte.
10773 *
10774 * @returns Strict VBox status code.
10775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10776 * @param pbDst Where to return the byte.
10777 * @param iSegReg The index of the segment register to use for
10778 * this access. The base and limits are checked.
10779 * @param GCPtrMem The address of the guest memory.
10780 */
10781IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10782{
10783 /* The lazy approach for now... */
10784 uint8_t const *pbSrc;
10785 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10786 if (rc == VINF_SUCCESS)
10787 {
10788 *pbDst = *pbSrc;
10789 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10790 }
10791 return rc;
10792}
10793
10794
10795/**
10796 * Fetches a system table word.
10797 *
10798 * @returns Strict VBox status code.
10799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10800 * @param pu16Dst Where to return the word.
10801 * @param iSegReg The index of the segment register to use for
10802 * this access. The base and limits are checked.
10803 * @param GCPtrMem The address of the guest memory.
10804 */
10805IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10806{
10807 /* The lazy approach for now... */
10808 uint16_t const *pu16Src;
10809 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10810 if (rc == VINF_SUCCESS)
10811 {
10812 *pu16Dst = *pu16Src;
10813 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10814 }
10815 return rc;
10816}
10817
10818
10819/**
10820 * Fetches a system table dword.
10821 *
10822 * @returns Strict VBox status code.
10823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10824 * @param pu32Dst Where to return the dword.
10825 * @param iSegReg The index of the segment register to use for
10826 * this access. The base and limits are checked.
10827 * @param GCPtrMem The address of the guest memory.
10828 */
10829IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10830{
10831 /* The lazy approach for now... */
10832 uint32_t const *pu32Src;
10833 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10834 if (rc == VINF_SUCCESS)
10835 {
10836 *pu32Dst = *pu32Src;
10837 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10838 }
10839 return rc;
10840}
10841
10842
10843/**
10844 * Fetches a system table qword.
10845 *
10846 * @returns Strict VBox status code.
10847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10848 * @param pu64Dst Where to return the qword.
10849 * @param iSegReg The index of the segment register to use for
10850 * this access. The base and limits are checked.
10851 * @param GCPtrMem The address of the guest memory.
10852 */
10853IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10854{
10855 /* The lazy approach for now... */
10856 uint64_t const *pu64Src;
10857 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10858 if (rc == VINF_SUCCESS)
10859 {
10860 *pu64Dst = *pu64Src;
10861 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10862 }
10863 return rc;
10864}
10865
10866
10867/**
10868 * Fetches a descriptor table entry with caller specified error code.
10869 *
10870 * @returns Strict VBox status code.
10871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10872 * @param pDesc Where to return the descriptor table entry.
10873 * @param uSel The selector which table entry to fetch.
10874 * @param uXcpt The exception to raise on table lookup error.
10875 * @param uErrorCode The error code associated with the exception.
10876 */
10877IEM_STATIC VBOXSTRICTRC
10878iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10879{
10880 AssertPtr(pDesc);
10881 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10882
10883 /** @todo did the 286 require all 8 bytes to be accessible? */
10884 /*
10885 * Get the selector table base and check bounds.
10886 */
10887 RTGCPTR GCPtrBase;
10888 if (uSel & X86_SEL_LDT)
10889 {
10890 if ( !pCtx->ldtr.Attr.n.u1Present
10891 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10892 {
10893 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10894 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10895 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10896 uErrorCode, 0);
10897 }
10898
10899 Assert(pCtx->ldtr.Attr.n.u1Present);
10900 GCPtrBase = pCtx->ldtr.u64Base;
10901 }
10902 else
10903 {
10904 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10905 {
10906 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10907 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10908 uErrorCode, 0);
10909 }
10910 GCPtrBase = pCtx->gdtr.pGdt;
10911 }
10912
10913 /*
10914 * Read the legacy descriptor and maybe the long mode extensions if
10915 * required.
10916 */
10917 VBOXSTRICTRC rcStrict;
10918 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10919 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10920 else
10921 {
10922 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10923 if (rcStrict == VINF_SUCCESS)
10924 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10925 if (rcStrict == VINF_SUCCESS)
10926 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10927 if (rcStrict == VINF_SUCCESS)
10928 pDesc->Legacy.au16[3] = 0;
10929 else
10930 return rcStrict;
10931 }
10932
10933 if (rcStrict == VINF_SUCCESS)
10934 {
10935 if ( !IEM_IS_LONG_MODE(pVCpu)
10936 || pDesc->Legacy.Gen.u1DescType)
10937 pDesc->Long.au64[1] = 0;
10938 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10939 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10940 else
10941 {
10942 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10943 /** @todo is this the right exception? */
10944 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10945 }
10946 }
10947 return rcStrict;
10948}
10949
10950
10951/**
10952 * Fetches a descriptor table entry.
10953 *
10954 * @returns Strict VBox status code.
10955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10956 * @param pDesc Where to return the descriptor table entry.
10957 * @param uSel The selector which table entry to fetch.
10958 * @param uXcpt The exception to raise on table lookup error.
10959 */
10960IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10961{
10962 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10963}
10964
10965
10966/**
10967 * Fakes a long mode stack selector for SS = 0.
10968 *
10969 * @param pDescSs Where to return the fake stack descriptor.
10970 * @param uDpl The DPL we want.
10971 */
10972IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10973{
10974 pDescSs->Long.au64[0] = 0;
10975 pDescSs->Long.au64[1] = 0;
10976 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10977 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10978 pDescSs->Long.Gen.u2Dpl = uDpl;
10979 pDescSs->Long.Gen.u1Present = 1;
10980 pDescSs->Long.Gen.u1Long = 1;
10981}
10982
10983
10984/**
10985 * Marks the selector descriptor as accessed (only non-system descriptors).
10986 *
10987 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10988 * will therefore skip the limit checks.
10989 *
10990 * @returns Strict VBox status code.
10991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10992 * @param uSel The selector.
10993 */
10994IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10995{
10996 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10997
10998 /*
10999 * Get the selector table base and calculate the entry address.
11000 */
11001 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11002 ? pCtx->ldtr.u64Base
11003 : pCtx->gdtr.pGdt;
11004 GCPtr += uSel & X86_SEL_MASK;
11005
11006 /*
11007 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11008 * ugly stuff to avoid this. This will make sure it's an atomic access
11009 * as well more or less remove any question about 8-bit or 32-bit accesss.
11010 */
11011 VBOXSTRICTRC rcStrict;
11012 uint32_t volatile *pu32;
11013 if ((GCPtr & 3) == 0)
11014 {
11015 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11016 GCPtr += 2 + 2;
11017 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11018 if (rcStrict != VINF_SUCCESS)
11019 return rcStrict;
11020 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11021 }
11022 else
11023 {
11024 /* The misaligned GDT/LDT case, map the whole thing. */
11025 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11026 if (rcStrict != VINF_SUCCESS)
11027 return rcStrict;
11028 switch ((uintptr_t)pu32 & 3)
11029 {
11030 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11031 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11032 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11033 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11034 }
11035 }
11036
11037 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11038}
11039
11040/** @} */
11041
11042
11043/*
11044 * Include the C/C++ implementation of instruction.
11045 */
11046#include "IEMAllCImpl.cpp.h"
11047
11048
11049
11050/** @name "Microcode" macros.
11051 *
11052 * The idea is that we should be able to use the same code to interpret
11053 * instructions as well as recompiler instructions. Thus this obfuscation.
11054 *
11055 * @{
11056 */
11057#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11058#define IEM_MC_END() }
11059#define IEM_MC_PAUSE() do {} while (0)
11060#define IEM_MC_CONTINUE() do {} while (0)
11061
11062/** Internal macro. */
11063#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11064 do \
11065 { \
11066 VBOXSTRICTRC rcStrict2 = a_Expr; \
11067 if (rcStrict2 != VINF_SUCCESS) \
11068 return rcStrict2; \
11069 } while (0)
11070
11071
11072#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11073#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11074#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11075#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11076#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11077#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11078#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11079#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11080#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11081 do { \
11082 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11083 return iemRaiseDeviceNotAvailable(pVCpu); \
11084 } while (0)
11085#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11086 do { \
11087 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11088 return iemRaiseDeviceNotAvailable(pVCpu); \
11089 } while (0)
11090#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11091 do { \
11092 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11093 return iemRaiseMathFault(pVCpu); \
11094 } while (0)
11095#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11096 do { \
11097 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11098 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11099 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11100 return iemRaiseUndefinedOpcode(pVCpu); \
11101 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11102 return iemRaiseDeviceNotAvailable(pVCpu); \
11103 } while (0)
11104#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11105 do { \
11106 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11107 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11108 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11109 return iemRaiseUndefinedOpcode(pVCpu); \
11110 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11111 return iemRaiseDeviceNotAvailable(pVCpu); \
11112 } while (0)
11113#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11114 do { \
11115 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11116 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11117 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11118 return iemRaiseUndefinedOpcode(pVCpu); \
11119 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11120 return iemRaiseDeviceNotAvailable(pVCpu); \
11121 } while (0)
11122#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11123 do { \
11124 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11125 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11126 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11127 return iemRaiseUndefinedOpcode(pVCpu); \
11128 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11129 return iemRaiseDeviceNotAvailable(pVCpu); \
11130 } while (0)
11131#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11132 do { \
11133 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11134 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11135 return iemRaiseUndefinedOpcode(pVCpu); \
11136 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11137 return iemRaiseDeviceNotAvailable(pVCpu); \
11138 } while (0)
11139#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11140 do { \
11141 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11142 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11143 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11144 return iemRaiseUndefinedOpcode(pVCpu); \
11145 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11146 return iemRaiseDeviceNotAvailable(pVCpu); \
11147 } while (0)
11148#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11149 do { \
11150 if (pVCpu->iem.s.uCpl != 0) \
11151 return iemRaiseGeneralProtectionFault0(pVCpu); \
11152 } while (0)
11153#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11154 do { \
11155 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11156 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11157 } while (0)
11158
11159
11160#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11161#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11162#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11163#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11164#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11165#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11166#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11167 uint32_t a_Name; \
11168 uint32_t *a_pName = &a_Name
11169#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11170 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11171
11172#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11173#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11174
11175#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11176#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11177#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11178#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11179#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11180#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11181#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11182#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11183#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11184#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11185#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11186#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11187#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11188#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11189#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11190#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11191#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11192#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11193#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11194#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11195#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11196#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11197#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11198#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11199#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11200#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11201#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11202#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11203#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11204/** @note Not for IOPL or IF testing or modification. */
11205#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11206#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11207#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11208#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11209
11210#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11211#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11212#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11213#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11214#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11215#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11216#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11217#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11218#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11219#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11220#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11221 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11222
11223#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11224#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11225/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11226 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11227#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11228#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11229/** @note Not for IOPL or IF testing or modification. */
11230#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11231
11232#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11233#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11234#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11235 do { \
11236 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11237 *pu32Reg += (a_u32Value); \
11238 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11239 } while (0)
11240#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11241
11242#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11243#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11244#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11245 do { \
11246 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11247 *pu32Reg -= (a_u32Value); \
11248 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11249 } while (0)
11250#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11251#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11252
11253#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11254#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11255#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11256#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11257#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11258#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11259#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11260
11261#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11262#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11263#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11264#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11265
11266#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11267#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11268#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11269
11270#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11271#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11272#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11273
11274#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11275#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11276#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11277
11278#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11279#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11280#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11281
11282#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11283
11284#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11285
11286#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11287#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11288#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11289 do { \
11290 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11291 *pu32Reg &= (a_u32Value); \
11292 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11293 } while (0)
11294#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11295
11296#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11297#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11298#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11299 do { \
11300 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11301 *pu32Reg |= (a_u32Value); \
11302 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11303 } while (0)
11304#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11305
11306
11307/** @note Not for IOPL or IF modification. */
11308#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11309/** @note Not for IOPL or IF modification. */
11310#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11311/** @note Not for IOPL or IF modification. */
11312#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11313
11314#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11315
11316/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11317#define IEM_MC_FPU_TO_MMX_MODE() do { \
11318 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11319 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11320 } while (0)
11321
11322#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11323 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11324#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11325 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11326#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11327 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11328 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11329 } while (0)
11330#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11331 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11332 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11333 } while (0)
11334#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11335 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11336#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11337 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11338#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11339 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11340
11341#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11342 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11343 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11344 } while (0)
11345#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11346 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11347#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11348 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11349#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11350 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11351#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11352 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11353 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11354 } while (0)
11355#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11356 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11357#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11358 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11359 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11360 } while (0)
11361#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11362 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11363#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11364 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11365 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11366 } while (0)
11367#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11368 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11369#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11370 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11371#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11372 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11373#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11374 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11375#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11376 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11377 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11378 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11379 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11380 } while (0)
11381
11382#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11383#define IEM_MC_STORE_YREG_U128_ZX(a_iYRegDst, a_u128Src) \
11384 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11385 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11386 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11387 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11388 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11389 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11390 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11391 } while (0)
11392#define IEM_MC_STORE_YREG_U256_ZX(a_iYRegDst, a_u256Src) \
11393 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11394 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11395 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11396 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11397 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11398 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11399 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11400 } while (0)
11401#define IEM_MC_COPY_YREG_U256_ZX(a_iYRegDst, a_iYRegSrc) \
11402 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11403 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11404 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11405 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11406 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11407 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11408 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11409 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11410 } while (0)
11411#define IEM_MC_COPY_YREG_U128_ZX(a_iYRegDst, a_iYRegSrc) \
11412 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11413 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11414 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11415 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11416 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11417 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11418 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11419 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11420 } while (0)
11421
11422#ifndef IEM_WITH_SETJMP
11423# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11424 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11425# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11426 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11427# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11428 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11429#else
11430# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11431 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11432# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11433 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11434# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11435 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11436#endif
11437
11438#ifndef IEM_WITH_SETJMP
11439# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11440 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11441# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11442 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11443# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11444 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11445#else
11446# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11447 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11448# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11449 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11450# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11451 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11452#endif
11453
11454#ifndef IEM_WITH_SETJMP
11455# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11456 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11457# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11458 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11459# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11460 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11461#else
11462# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11463 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11464# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11465 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11466# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11467 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11468#endif
11469
11470#ifdef SOME_UNUSED_FUNCTION
11471# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11472 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11473#endif
11474
11475#ifndef IEM_WITH_SETJMP
11476# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11477 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11478# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11479 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11480# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11481 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11482# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11483 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11484#else
11485# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11486 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11487# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11488 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11489# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11490 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11491# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11492 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11493#endif
11494
11495#ifndef IEM_WITH_SETJMP
11496# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11497 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11498# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11499 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11500# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11501 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11502#else
11503# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11504 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11505# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11506 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11507# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11508 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11509#endif
11510
11511#ifndef IEM_WITH_SETJMP
11512# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11513 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11514# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11515 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11516#else
11517# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11518 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11519# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11520 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11521#endif
11522
11523#ifndef IEM_WITH_SETJMP
11524# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11525 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11526# define IEM_MC_FETCH_MEM_U256_ALIGN_SSE(a_u256Dst, a_iSeg, a_GCPtrMem) \
11527 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11528#else
11529# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11530 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11531# define IEM_MC_FETCH_MEM_U256_ALIGN_SSE(a_u256Dst, a_iSeg, a_GCPtrMem) \
11532 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11533#endif
11534
11535
11536
11537#ifndef IEM_WITH_SETJMP
11538# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11539 do { \
11540 uint8_t u8Tmp; \
11541 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11542 (a_u16Dst) = u8Tmp; \
11543 } while (0)
11544# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11545 do { \
11546 uint8_t u8Tmp; \
11547 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11548 (a_u32Dst) = u8Tmp; \
11549 } while (0)
11550# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11551 do { \
11552 uint8_t u8Tmp; \
11553 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11554 (a_u64Dst) = u8Tmp; \
11555 } while (0)
11556# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11557 do { \
11558 uint16_t u16Tmp; \
11559 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11560 (a_u32Dst) = u16Tmp; \
11561 } while (0)
11562# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11563 do { \
11564 uint16_t u16Tmp; \
11565 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11566 (a_u64Dst) = u16Tmp; \
11567 } while (0)
11568# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11569 do { \
11570 uint32_t u32Tmp; \
11571 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11572 (a_u64Dst) = u32Tmp; \
11573 } while (0)
11574#else /* IEM_WITH_SETJMP */
11575# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11576 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11577# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11578 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11579# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11580 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11581# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11582 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11583# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11584 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11585# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11586 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11587#endif /* IEM_WITH_SETJMP */
11588
11589#ifndef IEM_WITH_SETJMP
11590# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11591 do { \
11592 uint8_t u8Tmp; \
11593 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11594 (a_u16Dst) = (int8_t)u8Tmp; \
11595 } while (0)
11596# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11597 do { \
11598 uint8_t u8Tmp; \
11599 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11600 (a_u32Dst) = (int8_t)u8Tmp; \
11601 } while (0)
11602# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11603 do { \
11604 uint8_t u8Tmp; \
11605 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11606 (a_u64Dst) = (int8_t)u8Tmp; \
11607 } while (0)
11608# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11609 do { \
11610 uint16_t u16Tmp; \
11611 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11612 (a_u32Dst) = (int16_t)u16Tmp; \
11613 } while (0)
11614# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11615 do { \
11616 uint16_t u16Tmp; \
11617 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11618 (a_u64Dst) = (int16_t)u16Tmp; \
11619 } while (0)
11620# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11621 do { \
11622 uint32_t u32Tmp; \
11623 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11624 (a_u64Dst) = (int32_t)u32Tmp; \
11625 } while (0)
11626#else /* IEM_WITH_SETJMP */
11627# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11628 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11629# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11630 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11631# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11632 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11633# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11634 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11635# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11636 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11637# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11638 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11639#endif /* IEM_WITH_SETJMP */
11640
11641#ifndef IEM_WITH_SETJMP
11642# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11643 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11644# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11645 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11646# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11647 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11648# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11649 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11650#else
11651# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11652 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11653# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11654 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11655# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11656 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11657# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11658 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11659#endif
11660
11661#ifndef IEM_WITH_SETJMP
11662# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11663 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11664# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11665 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11666# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11667 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11668# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11669 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11670#else
11671# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11672 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11673# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11674 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11675# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11676 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11677# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11678 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11679#endif
11680
11681#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11682#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11683#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11684#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11685#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11686#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11687#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11688 do { \
11689 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11690 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11691 } while (0)
11692
11693#ifndef IEM_WITH_SETJMP
11694# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11695 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11696# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11697 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11698#else
11699# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11700 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11701# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11702 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11703#endif
11704
11705
11706#define IEM_MC_PUSH_U16(a_u16Value) \
11707 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11708#define IEM_MC_PUSH_U32(a_u32Value) \
11709 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11710#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11711 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11712#define IEM_MC_PUSH_U64(a_u64Value) \
11713 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11714
11715#define IEM_MC_POP_U16(a_pu16Value) \
11716 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11717#define IEM_MC_POP_U32(a_pu32Value) \
11718 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11719#define IEM_MC_POP_U64(a_pu64Value) \
11720 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11721
11722/** Maps guest memory for direct or bounce buffered access.
11723 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11724 * @remarks May return.
11725 */
11726#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11727 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11728
11729/** Maps guest memory for direct or bounce buffered access.
11730 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11731 * @remarks May return.
11732 */
11733#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11734 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11735
11736/** Commits the memory and unmaps the guest memory.
11737 * @remarks May return.
11738 */
11739#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11740 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11741
11742/** Commits the memory and unmaps the guest memory unless the FPU status word
11743 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11744 * that would cause FLD not to store.
11745 *
11746 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11747 * store, while \#P will not.
11748 *
11749 * @remarks May in theory return - for now.
11750 */
11751#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11752 do { \
11753 if ( !(a_u16FSW & X86_FSW_ES) \
11754 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11755 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11756 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11757 } while (0)
11758
11759/** Calculate efficient address from R/M. */
11760#ifndef IEM_WITH_SETJMP
11761# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11762 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11763#else
11764# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11765 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11766#endif
11767
11768#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11769#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11770#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11771#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11772#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11773#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11774#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11775
11776/**
11777 * Defers the rest of the instruction emulation to a C implementation routine
11778 * and returns, only taking the standard parameters.
11779 *
11780 * @param a_pfnCImpl The pointer to the C routine.
11781 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11782 */
11783#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11784
11785/**
11786 * Defers the rest of instruction emulation to a C implementation routine and
11787 * returns, taking one argument in addition to the standard ones.
11788 *
11789 * @param a_pfnCImpl The pointer to the C routine.
11790 * @param a0 The argument.
11791 */
11792#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11793
11794/**
11795 * Defers the rest of the instruction emulation to a C implementation routine
11796 * and returns, taking two arguments in addition to the standard ones.
11797 *
11798 * @param a_pfnCImpl The pointer to the C routine.
11799 * @param a0 The first extra argument.
11800 * @param a1 The second extra argument.
11801 */
11802#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11803
11804/**
11805 * Defers the rest of the instruction emulation to a C implementation routine
11806 * and returns, taking three arguments in addition to the standard ones.
11807 *
11808 * @param a_pfnCImpl The pointer to the C routine.
11809 * @param a0 The first extra argument.
11810 * @param a1 The second extra argument.
11811 * @param a2 The third extra argument.
11812 */
11813#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11814
11815/**
11816 * Defers the rest of the instruction emulation to a C implementation routine
11817 * and returns, taking four arguments in addition to the standard ones.
11818 *
11819 * @param a_pfnCImpl The pointer to the C routine.
11820 * @param a0 The first extra argument.
11821 * @param a1 The second extra argument.
11822 * @param a2 The third extra argument.
11823 * @param a3 The fourth extra argument.
11824 */
11825#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11826
11827/**
11828 * Defers the rest of the instruction emulation to a C implementation routine
11829 * and returns, taking two arguments in addition to the standard ones.
11830 *
11831 * @param a_pfnCImpl The pointer to the C routine.
11832 * @param a0 The first extra argument.
11833 * @param a1 The second extra argument.
11834 * @param a2 The third extra argument.
11835 * @param a3 The fourth extra argument.
11836 * @param a4 The fifth extra argument.
11837 */
11838#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11839
11840/**
11841 * Defers the entire instruction emulation to a C implementation routine and
11842 * returns, only taking the standard parameters.
11843 *
11844 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11845 *
11846 * @param a_pfnCImpl The pointer to the C routine.
11847 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11848 */
11849#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11850
11851/**
11852 * Defers the entire instruction emulation to a C implementation routine and
11853 * returns, taking one argument in addition to the standard ones.
11854 *
11855 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11856 *
11857 * @param a_pfnCImpl The pointer to the C routine.
11858 * @param a0 The argument.
11859 */
11860#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11861
11862/**
11863 * Defers the entire instruction emulation to a C implementation routine and
11864 * returns, taking two arguments in addition to the standard ones.
11865 *
11866 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11867 *
11868 * @param a_pfnCImpl The pointer to the C routine.
11869 * @param a0 The first extra argument.
11870 * @param a1 The second extra argument.
11871 */
11872#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11873
11874/**
11875 * Defers the entire instruction emulation to a C implementation routine and
11876 * returns, taking three arguments in addition to the standard ones.
11877 *
11878 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11879 *
11880 * @param a_pfnCImpl The pointer to the C routine.
11881 * @param a0 The first extra argument.
11882 * @param a1 The second extra argument.
11883 * @param a2 The third extra argument.
11884 */
11885#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11886
11887/**
11888 * Calls a FPU assembly implementation taking one visible argument.
11889 *
11890 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11891 * @param a0 The first extra argument.
11892 */
11893#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11894 do { \
11895 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11896 } while (0)
11897
11898/**
11899 * Calls a FPU assembly implementation taking two visible arguments.
11900 *
11901 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11902 * @param a0 The first extra argument.
11903 * @param a1 The second extra argument.
11904 */
11905#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11906 do { \
11907 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11908 } while (0)
11909
11910/**
11911 * Calls a FPU assembly implementation taking three visible arguments.
11912 *
11913 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11914 * @param a0 The first extra argument.
11915 * @param a1 The second extra argument.
11916 * @param a2 The third extra argument.
11917 */
11918#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11919 do { \
11920 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11921 } while (0)
11922
11923#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11924 do { \
11925 (a_FpuData).FSW = (a_FSW); \
11926 (a_FpuData).r80Result = *(a_pr80Value); \
11927 } while (0)
11928
11929/** Pushes FPU result onto the stack. */
11930#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11931 iemFpuPushResult(pVCpu, &a_FpuData)
11932/** Pushes FPU result onto the stack and sets the FPUDP. */
11933#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11934 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11935
11936/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11937#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11938 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11939
11940/** Stores FPU result in a stack register. */
11941#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11942 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11943/** Stores FPU result in a stack register and pops the stack. */
11944#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11945 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11946/** Stores FPU result in a stack register and sets the FPUDP. */
11947#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11948 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11949/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11950 * stack. */
11951#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11952 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11953
11954/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11955#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11956 iemFpuUpdateOpcodeAndIp(pVCpu)
11957/** Free a stack register (for FFREE and FFREEP). */
11958#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11959 iemFpuStackFree(pVCpu, a_iStReg)
11960/** Increment the FPU stack pointer. */
11961#define IEM_MC_FPU_STACK_INC_TOP() \
11962 iemFpuStackIncTop(pVCpu)
11963/** Decrement the FPU stack pointer. */
11964#define IEM_MC_FPU_STACK_DEC_TOP() \
11965 iemFpuStackDecTop(pVCpu)
11966
11967/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11968#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11969 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11970/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11971#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11972 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11973/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11974#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11975 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11976/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11977#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11978 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11979/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11980 * stack. */
11981#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11982 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11983/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11984#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11985 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11986
11987/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11988#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11989 iemFpuStackUnderflow(pVCpu, a_iStDst)
11990/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11991 * stack. */
11992#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11993 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11994/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11995 * FPUDS. */
11996#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11997 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11998/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11999 * FPUDS. Pops stack. */
12000#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12001 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12002/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12003 * stack twice. */
12004#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12005 iemFpuStackUnderflowThenPopPop(pVCpu)
12006/** Raises a FPU stack underflow exception for an instruction pushing a result
12007 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12008#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12009 iemFpuStackPushUnderflow(pVCpu)
12010/** Raises a FPU stack underflow exception for an instruction pushing a result
12011 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12012#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12013 iemFpuStackPushUnderflowTwo(pVCpu)
12014
12015/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12016 * FPUIP, FPUCS and FOP. */
12017#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12018 iemFpuStackPushOverflow(pVCpu)
12019/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12020 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12021#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12022 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12023/** Prepares for using the FPU state.
12024 * Ensures that we can use the host FPU in the current context (RC+R0.
12025 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12026#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12027/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12028#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12029/** Actualizes the guest FPU state so it can be accessed and modified. */
12030#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12031
12032/** Prepares for using the SSE state.
12033 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12034 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12035#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12036/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12037#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12038/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12039#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12040
12041/** Prepares for using the AVX state.
12042 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12043 * Ensures the guest AVX state in the CPUMCTX is up to date.
12044 * @note This will include the AVX512 state too when support for it is added
12045 * due to the zero extending feature of VEX instruction. */
12046#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12047/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12048#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12049/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12050#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12051
12052/**
12053 * Calls a MMX assembly implementation taking two visible arguments.
12054 *
12055 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12056 * @param a0 The first extra argument.
12057 * @param a1 The second extra argument.
12058 */
12059#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12060 do { \
12061 IEM_MC_PREPARE_FPU_USAGE(); \
12062 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12063 } while (0)
12064
12065/**
12066 * Calls a MMX assembly implementation taking three visible arguments.
12067 *
12068 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12069 * @param a0 The first extra argument.
12070 * @param a1 The second extra argument.
12071 * @param a2 The third extra argument.
12072 */
12073#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12074 do { \
12075 IEM_MC_PREPARE_FPU_USAGE(); \
12076 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12077 } while (0)
12078
12079
12080/**
12081 * Calls a SSE assembly implementation taking two visible arguments.
12082 *
12083 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12084 * @param a0 The first extra argument.
12085 * @param a1 The second extra argument.
12086 */
12087#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12088 do { \
12089 IEM_MC_PREPARE_SSE_USAGE(); \
12090 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12091 } while (0)
12092
12093/**
12094 * Calls a SSE assembly implementation taking three visible arguments.
12095 *
12096 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12097 * @param a0 The first extra argument.
12098 * @param a1 The second extra argument.
12099 * @param a2 The third extra argument.
12100 */
12101#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12102 do { \
12103 IEM_MC_PREPARE_SSE_USAGE(); \
12104 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12105 } while (0)
12106
12107/** @note Not for IOPL or IF testing. */
12108#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12109/** @note Not for IOPL or IF testing. */
12110#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12111/** @note Not for IOPL or IF testing. */
12112#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12113/** @note Not for IOPL or IF testing. */
12114#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12115/** @note Not for IOPL or IF testing. */
12116#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12117 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12118 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12119/** @note Not for IOPL or IF testing. */
12120#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12121 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12122 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12123/** @note Not for IOPL or IF testing. */
12124#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12125 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12126 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12127 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12128/** @note Not for IOPL or IF testing. */
12129#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12130 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12131 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12132 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12133#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12134#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12135#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12136/** @note Not for IOPL or IF testing. */
12137#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12138 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12139 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12140/** @note Not for IOPL or IF testing. */
12141#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12142 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12143 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12144/** @note Not for IOPL or IF testing. */
12145#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12146 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12147 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12148/** @note Not for IOPL or IF testing. */
12149#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12150 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12151 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12152/** @note Not for IOPL or IF testing. */
12153#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12154 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12155 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12156/** @note Not for IOPL or IF testing. */
12157#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12158 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12159 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12160#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12161#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12162
12163#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12164 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12165#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12166 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12167#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12168 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12169#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12170 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12171#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12172 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12173#define IEM_MC_IF_FCW_IM() \
12174 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12175
12176#define IEM_MC_ELSE() } else {
12177#define IEM_MC_ENDIF() } do {} while (0)
12178
12179/** @} */
12180
12181
12182/** @name Opcode Debug Helpers.
12183 * @{
12184 */
12185#ifdef VBOX_WITH_STATISTICS
12186# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12187#else
12188# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12189#endif
12190
12191#ifdef DEBUG
12192# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12193 do { \
12194 IEMOP_INC_STATS(a_Stats); \
12195 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12196 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12197 } while (0)
12198
12199# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12200 do { \
12201 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12202 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12203 (void)RT_CONCAT(OP_,a_Upper); \
12204 (void)(a_fDisHints); \
12205 (void)(a_fIemHints); \
12206 } while (0)
12207
12208# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12209 do { \
12210 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12211 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12212 (void)RT_CONCAT(OP_,a_Upper); \
12213 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12214 (void)(a_fDisHints); \
12215 (void)(a_fIemHints); \
12216 } while (0)
12217
12218# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12219 do { \
12220 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12221 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12222 (void)RT_CONCAT(OP_,a_Upper); \
12223 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12224 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12225 (void)(a_fDisHints); \
12226 (void)(a_fIemHints); \
12227 } while (0)
12228
12229# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12230 do { \
12231 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12232 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12233 (void)RT_CONCAT(OP_,a_Upper); \
12234 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12235 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12236 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12237 (void)(a_fDisHints); \
12238 (void)(a_fIemHints); \
12239 } while (0)
12240
12241# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12242 do { \
12243 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12244 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12245 (void)RT_CONCAT(OP_,a_Upper); \
12246 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12247 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12248 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12249 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12250 (void)(a_fDisHints); \
12251 (void)(a_fIemHints); \
12252 } while (0)
12253
12254#else
12255# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12256
12257# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12258 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12259# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12260 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12261# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12262 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12263# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12264 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12265# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12266 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12267
12268#endif
12269
12270#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12271 IEMOP_MNEMONIC0EX(a_Lower, \
12272 #a_Lower, \
12273 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12274#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12275 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12276 #a_Lower " " #a_Op1, \
12277 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12278#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12279 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12280 #a_Lower " " #a_Op1 "," #a_Op2, \
12281 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12282#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12283 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12284 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12285 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12286#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12287 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12288 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12289 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12290
12291/** @} */
12292
12293
12294/** @name Opcode Helpers.
12295 * @{
12296 */
12297
12298#ifdef IN_RING3
12299# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12300 do { \
12301 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12302 else \
12303 { \
12304 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12305 return IEMOP_RAISE_INVALID_OPCODE(); \
12306 } \
12307 } while (0)
12308#else
12309# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12310 do { \
12311 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12312 else return IEMOP_RAISE_INVALID_OPCODE(); \
12313 } while (0)
12314#endif
12315
12316/** The instruction requires a 186 or later. */
12317#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12318# define IEMOP_HLP_MIN_186() do { } while (0)
12319#else
12320# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12321#endif
12322
12323/** The instruction requires a 286 or later. */
12324#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12325# define IEMOP_HLP_MIN_286() do { } while (0)
12326#else
12327# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12328#endif
12329
12330/** The instruction requires a 386 or later. */
12331#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12332# define IEMOP_HLP_MIN_386() do { } while (0)
12333#else
12334# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12335#endif
12336
12337/** The instruction requires a 386 or later if the given expression is true. */
12338#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12339# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12340#else
12341# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12342#endif
12343
12344/** The instruction requires a 486 or later. */
12345#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12346# define IEMOP_HLP_MIN_486() do { } while (0)
12347#else
12348# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12349#endif
12350
12351/** The instruction requires a Pentium (586) or later. */
12352#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12353# define IEMOP_HLP_MIN_586() do { } while (0)
12354#else
12355# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12356#endif
12357
12358/** The instruction requires a PentiumPro (686) or later. */
12359#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12360# define IEMOP_HLP_MIN_686() do { } while (0)
12361#else
12362# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12363#endif
12364
12365
12366/** The instruction raises an \#UD in real and V8086 mode. */
12367#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12368 do \
12369 { \
12370 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12371 else return IEMOP_RAISE_INVALID_OPCODE(); \
12372 } while (0)
12373
12374/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12375 * 64-bit mode. */
12376#define IEMOP_HLP_NO_64BIT() \
12377 do \
12378 { \
12379 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12380 return IEMOP_RAISE_INVALID_OPCODE(); \
12381 } while (0)
12382
12383/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12384 * 64-bit mode. */
12385#define IEMOP_HLP_ONLY_64BIT() \
12386 do \
12387 { \
12388 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12389 return IEMOP_RAISE_INVALID_OPCODE(); \
12390 } while (0)
12391
12392/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12393#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12394 do \
12395 { \
12396 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12397 iemRecalEffOpSize64Default(pVCpu); \
12398 } while (0)
12399
12400/** The instruction has 64-bit operand size if 64-bit mode. */
12401#define IEMOP_HLP_64BIT_OP_SIZE() \
12402 do \
12403 { \
12404 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12405 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12406 } while (0)
12407
12408/** Only a REX prefix immediately preceeding the first opcode byte takes
12409 * effect. This macro helps ensuring this as well as logging bad guest code. */
12410#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12411 do \
12412 { \
12413 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12414 { \
12415 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12416 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12417 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12418 pVCpu->iem.s.uRexB = 0; \
12419 pVCpu->iem.s.uRexIndex = 0; \
12420 pVCpu->iem.s.uRexReg = 0; \
12421 iemRecalEffOpSize(pVCpu); \
12422 } \
12423 } while (0)
12424
12425/**
12426 * Done decoding.
12427 */
12428#define IEMOP_HLP_DONE_DECODING() \
12429 do \
12430 { \
12431 /*nothing for now, maybe later... */ \
12432 } while (0)
12433
12434/**
12435 * Done decoding, raise \#UD exception if lock prefix present.
12436 */
12437#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12438 do \
12439 { \
12440 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12441 { /* likely */ } \
12442 else \
12443 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12444 } while (0)
12445
12446
12447/**
12448 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12449 * repnz or size prefixes are present, or if in real or v8086 mode.
12450 */
12451#define IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX() \
12452 do \
12453 { \
12454 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12455 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12456 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12457 { /* likely */ } \
12458 else \
12459 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12460 } while (0)
12461
12462
12463/**
12464 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12465 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12466 * register 0, or if in real or v8086 mode.
12467 */
12468#define IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX_AND_NO_VVVV() \
12469 do \
12470 { \
12471 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12472 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12473 && !pVCpu->iem.s.uVex3rdReg \
12474 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12475 { /* likely */ } \
12476 else \
12477 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12478 } while (0)
12479
12480#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12481 do \
12482 { \
12483 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12484 { /* likely */ } \
12485 else \
12486 { \
12487 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12488 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12489 } \
12490 } while (0)
12491#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12492 do \
12493 { \
12494 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12495 { /* likely */ } \
12496 else \
12497 { \
12498 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12499 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12500 } \
12501 } while (0)
12502
12503/**
12504 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12505 * are present.
12506 */
12507#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12508 do \
12509 { \
12510 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12511 { /* likely */ } \
12512 else \
12513 return IEMOP_RAISE_INVALID_OPCODE(); \
12514 } while (0)
12515
12516
12517/**
12518 * Done decoding VEX.
12519 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, or if
12520 * we're in real or v8086 mode.
12521 */
12522#define IEMOP_HLP_DONE_VEX_DECODING() \
12523 do \
12524 { \
12525 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12526 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12527 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12528 { /* likely */ } \
12529 else \
12530 return IEMOP_RAISE_INVALID_OPCODE(); \
12531 } while (0)
12532
12533/**
12534 * Done decoding VEX, no V, no L.
12535 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12536 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12537 */
12538#define IEMOP_HLP_DONE_VEX_DECODING_L_ZERO_NO_VVV() \
12539 do \
12540 { \
12541 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12542 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12543 && pVCpu->iem.s.uVexLength == 0 \
12544 && pVCpu->iem.s.uVex3rdReg == 0 \
12545 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12546 { /* likely */ } \
12547 else \
12548 return IEMOP_RAISE_INVALID_OPCODE(); \
12549 } while (0)
12550
12551#ifdef VBOX_WITH_NESTED_HWVIRT
12552/** Check and handles SVM nested-guest control & instruction intercept. */
12553# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12554 do \
12555 { \
12556 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12557 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12558 } while (0)
12559
12560/** Check and handle SVM nested-guest CR0 read intercept. */
12561# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12562 do \
12563 { \
12564 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12565 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12566 } while (0)
12567
12568#else
12569# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12570# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12571
12572#endif /* VBOX_WITH_NESTED_HWVIRT */
12573
12574
12575/**
12576 * Calculates the effective address of a ModR/M memory operand.
12577 *
12578 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12579 *
12580 * @return Strict VBox status code.
12581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12582 * @param bRm The ModRM byte.
12583 * @param cbImm The size of any immediate following the
12584 * effective address opcode bytes. Important for
12585 * RIP relative addressing.
12586 * @param pGCPtrEff Where to return the effective address.
12587 */
12588IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12589{
12590 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12591 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12592# define SET_SS_DEF() \
12593 do \
12594 { \
12595 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12596 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12597 } while (0)
12598
12599 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12600 {
12601/** @todo Check the effective address size crap! */
12602 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12603 {
12604 uint16_t u16EffAddr;
12605
12606 /* Handle the disp16 form with no registers first. */
12607 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12608 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12609 else
12610 {
12611 /* Get the displacment. */
12612 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12613 {
12614 case 0: u16EffAddr = 0; break;
12615 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12616 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12617 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12618 }
12619
12620 /* Add the base and index registers to the disp. */
12621 switch (bRm & X86_MODRM_RM_MASK)
12622 {
12623 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12624 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12625 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12626 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12627 case 4: u16EffAddr += pCtx->si; break;
12628 case 5: u16EffAddr += pCtx->di; break;
12629 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12630 case 7: u16EffAddr += pCtx->bx; break;
12631 }
12632 }
12633
12634 *pGCPtrEff = u16EffAddr;
12635 }
12636 else
12637 {
12638 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12639 uint32_t u32EffAddr;
12640
12641 /* Handle the disp32 form with no registers first. */
12642 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12643 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12644 else
12645 {
12646 /* Get the register (or SIB) value. */
12647 switch ((bRm & X86_MODRM_RM_MASK))
12648 {
12649 case 0: u32EffAddr = pCtx->eax; break;
12650 case 1: u32EffAddr = pCtx->ecx; break;
12651 case 2: u32EffAddr = pCtx->edx; break;
12652 case 3: u32EffAddr = pCtx->ebx; break;
12653 case 4: /* SIB */
12654 {
12655 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12656
12657 /* Get the index and scale it. */
12658 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12659 {
12660 case 0: u32EffAddr = pCtx->eax; break;
12661 case 1: u32EffAddr = pCtx->ecx; break;
12662 case 2: u32EffAddr = pCtx->edx; break;
12663 case 3: u32EffAddr = pCtx->ebx; break;
12664 case 4: u32EffAddr = 0; /*none */ break;
12665 case 5: u32EffAddr = pCtx->ebp; break;
12666 case 6: u32EffAddr = pCtx->esi; break;
12667 case 7: u32EffAddr = pCtx->edi; break;
12668 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12669 }
12670 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12671
12672 /* add base */
12673 switch (bSib & X86_SIB_BASE_MASK)
12674 {
12675 case 0: u32EffAddr += pCtx->eax; break;
12676 case 1: u32EffAddr += pCtx->ecx; break;
12677 case 2: u32EffAddr += pCtx->edx; break;
12678 case 3: u32EffAddr += pCtx->ebx; break;
12679 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12680 case 5:
12681 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12682 {
12683 u32EffAddr += pCtx->ebp;
12684 SET_SS_DEF();
12685 }
12686 else
12687 {
12688 uint32_t u32Disp;
12689 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12690 u32EffAddr += u32Disp;
12691 }
12692 break;
12693 case 6: u32EffAddr += pCtx->esi; break;
12694 case 7: u32EffAddr += pCtx->edi; break;
12695 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12696 }
12697 break;
12698 }
12699 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12700 case 6: u32EffAddr = pCtx->esi; break;
12701 case 7: u32EffAddr = pCtx->edi; break;
12702 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12703 }
12704
12705 /* Get and add the displacement. */
12706 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12707 {
12708 case 0:
12709 break;
12710 case 1:
12711 {
12712 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12713 u32EffAddr += i8Disp;
12714 break;
12715 }
12716 case 2:
12717 {
12718 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12719 u32EffAddr += u32Disp;
12720 break;
12721 }
12722 default:
12723 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12724 }
12725
12726 }
12727 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12728 *pGCPtrEff = u32EffAddr;
12729 else
12730 {
12731 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12732 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12733 }
12734 }
12735 }
12736 else
12737 {
12738 uint64_t u64EffAddr;
12739
12740 /* Handle the rip+disp32 form with no registers first. */
12741 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12742 {
12743 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12744 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12745 }
12746 else
12747 {
12748 /* Get the register (or SIB) value. */
12749 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12750 {
12751 case 0: u64EffAddr = pCtx->rax; break;
12752 case 1: u64EffAddr = pCtx->rcx; break;
12753 case 2: u64EffAddr = pCtx->rdx; break;
12754 case 3: u64EffAddr = pCtx->rbx; break;
12755 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12756 case 6: u64EffAddr = pCtx->rsi; break;
12757 case 7: u64EffAddr = pCtx->rdi; break;
12758 case 8: u64EffAddr = pCtx->r8; break;
12759 case 9: u64EffAddr = pCtx->r9; break;
12760 case 10: u64EffAddr = pCtx->r10; break;
12761 case 11: u64EffAddr = pCtx->r11; break;
12762 case 13: u64EffAddr = pCtx->r13; break;
12763 case 14: u64EffAddr = pCtx->r14; break;
12764 case 15: u64EffAddr = pCtx->r15; break;
12765 /* SIB */
12766 case 4:
12767 case 12:
12768 {
12769 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12770
12771 /* Get the index and scale it. */
12772 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12773 {
12774 case 0: u64EffAddr = pCtx->rax; break;
12775 case 1: u64EffAddr = pCtx->rcx; break;
12776 case 2: u64EffAddr = pCtx->rdx; break;
12777 case 3: u64EffAddr = pCtx->rbx; break;
12778 case 4: u64EffAddr = 0; /*none */ break;
12779 case 5: u64EffAddr = pCtx->rbp; break;
12780 case 6: u64EffAddr = pCtx->rsi; break;
12781 case 7: u64EffAddr = pCtx->rdi; break;
12782 case 8: u64EffAddr = pCtx->r8; break;
12783 case 9: u64EffAddr = pCtx->r9; break;
12784 case 10: u64EffAddr = pCtx->r10; break;
12785 case 11: u64EffAddr = pCtx->r11; break;
12786 case 12: u64EffAddr = pCtx->r12; break;
12787 case 13: u64EffAddr = pCtx->r13; break;
12788 case 14: u64EffAddr = pCtx->r14; break;
12789 case 15: u64EffAddr = pCtx->r15; break;
12790 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12791 }
12792 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12793
12794 /* add base */
12795 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12796 {
12797 case 0: u64EffAddr += pCtx->rax; break;
12798 case 1: u64EffAddr += pCtx->rcx; break;
12799 case 2: u64EffAddr += pCtx->rdx; break;
12800 case 3: u64EffAddr += pCtx->rbx; break;
12801 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12802 case 6: u64EffAddr += pCtx->rsi; break;
12803 case 7: u64EffAddr += pCtx->rdi; break;
12804 case 8: u64EffAddr += pCtx->r8; break;
12805 case 9: u64EffAddr += pCtx->r9; break;
12806 case 10: u64EffAddr += pCtx->r10; break;
12807 case 11: u64EffAddr += pCtx->r11; break;
12808 case 12: u64EffAddr += pCtx->r12; break;
12809 case 14: u64EffAddr += pCtx->r14; break;
12810 case 15: u64EffAddr += pCtx->r15; break;
12811 /* complicated encodings */
12812 case 5:
12813 case 13:
12814 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12815 {
12816 if (!pVCpu->iem.s.uRexB)
12817 {
12818 u64EffAddr += pCtx->rbp;
12819 SET_SS_DEF();
12820 }
12821 else
12822 u64EffAddr += pCtx->r13;
12823 }
12824 else
12825 {
12826 uint32_t u32Disp;
12827 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12828 u64EffAddr += (int32_t)u32Disp;
12829 }
12830 break;
12831 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12832 }
12833 break;
12834 }
12835 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12836 }
12837
12838 /* Get and add the displacement. */
12839 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12840 {
12841 case 0:
12842 break;
12843 case 1:
12844 {
12845 int8_t i8Disp;
12846 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12847 u64EffAddr += i8Disp;
12848 break;
12849 }
12850 case 2:
12851 {
12852 uint32_t u32Disp;
12853 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12854 u64EffAddr += (int32_t)u32Disp;
12855 break;
12856 }
12857 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12858 }
12859
12860 }
12861
12862 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12863 *pGCPtrEff = u64EffAddr;
12864 else
12865 {
12866 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12867 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12868 }
12869 }
12870
12871 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12872 return VINF_SUCCESS;
12873}
12874
12875
12876/**
12877 * Calculates the effective address of a ModR/M memory operand.
12878 *
12879 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12880 *
12881 * @return Strict VBox status code.
12882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12883 * @param bRm The ModRM byte.
12884 * @param cbImm The size of any immediate following the
12885 * effective address opcode bytes. Important for
12886 * RIP relative addressing.
12887 * @param pGCPtrEff Where to return the effective address.
12888 * @param offRsp RSP displacement.
12889 */
12890IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
12891{
12892 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12893 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12894# define SET_SS_DEF() \
12895 do \
12896 { \
12897 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12898 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12899 } while (0)
12900
12901 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12902 {
12903/** @todo Check the effective address size crap! */
12904 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12905 {
12906 uint16_t u16EffAddr;
12907
12908 /* Handle the disp16 form with no registers first. */
12909 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12910 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12911 else
12912 {
12913 /* Get the displacment. */
12914 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12915 {
12916 case 0: u16EffAddr = 0; break;
12917 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12918 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12919 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12920 }
12921
12922 /* Add the base and index registers to the disp. */
12923 switch (bRm & X86_MODRM_RM_MASK)
12924 {
12925 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12926 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12927 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12928 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12929 case 4: u16EffAddr += pCtx->si; break;
12930 case 5: u16EffAddr += pCtx->di; break;
12931 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12932 case 7: u16EffAddr += pCtx->bx; break;
12933 }
12934 }
12935
12936 *pGCPtrEff = u16EffAddr;
12937 }
12938 else
12939 {
12940 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12941 uint32_t u32EffAddr;
12942
12943 /* Handle the disp32 form with no registers first. */
12944 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12945 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12946 else
12947 {
12948 /* Get the register (or SIB) value. */
12949 switch ((bRm & X86_MODRM_RM_MASK))
12950 {
12951 case 0: u32EffAddr = pCtx->eax; break;
12952 case 1: u32EffAddr = pCtx->ecx; break;
12953 case 2: u32EffAddr = pCtx->edx; break;
12954 case 3: u32EffAddr = pCtx->ebx; break;
12955 case 4: /* SIB */
12956 {
12957 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12958
12959 /* Get the index and scale it. */
12960 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12961 {
12962 case 0: u32EffAddr = pCtx->eax; break;
12963 case 1: u32EffAddr = pCtx->ecx; break;
12964 case 2: u32EffAddr = pCtx->edx; break;
12965 case 3: u32EffAddr = pCtx->ebx; break;
12966 case 4: u32EffAddr = 0; /*none */ break;
12967 case 5: u32EffAddr = pCtx->ebp; break;
12968 case 6: u32EffAddr = pCtx->esi; break;
12969 case 7: u32EffAddr = pCtx->edi; break;
12970 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12971 }
12972 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12973
12974 /* add base */
12975 switch (bSib & X86_SIB_BASE_MASK)
12976 {
12977 case 0: u32EffAddr += pCtx->eax; break;
12978 case 1: u32EffAddr += pCtx->ecx; break;
12979 case 2: u32EffAddr += pCtx->edx; break;
12980 case 3: u32EffAddr += pCtx->ebx; break;
12981 case 4:
12982 u32EffAddr += pCtx->esp + offRsp;
12983 SET_SS_DEF();
12984 break;
12985 case 5:
12986 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12987 {
12988 u32EffAddr += pCtx->ebp;
12989 SET_SS_DEF();
12990 }
12991 else
12992 {
12993 uint32_t u32Disp;
12994 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12995 u32EffAddr += u32Disp;
12996 }
12997 break;
12998 case 6: u32EffAddr += pCtx->esi; break;
12999 case 7: u32EffAddr += pCtx->edi; break;
13000 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13001 }
13002 break;
13003 }
13004 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13005 case 6: u32EffAddr = pCtx->esi; break;
13006 case 7: u32EffAddr = pCtx->edi; break;
13007 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13008 }
13009
13010 /* Get and add the displacement. */
13011 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13012 {
13013 case 0:
13014 break;
13015 case 1:
13016 {
13017 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13018 u32EffAddr += i8Disp;
13019 break;
13020 }
13021 case 2:
13022 {
13023 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13024 u32EffAddr += u32Disp;
13025 break;
13026 }
13027 default:
13028 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13029 }
13030
13031 }
13032 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13033 *pGCPtrEff = u32EffAddr;
13034 else
13035 {
13036 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13037 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13038 }
13039 }
13040 }
13041 else
13042 {
13043 uint64_t u64EffAddr;
13044
13045 /* Handle the rip+disp32 form with no registers first. */
13046 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13047 {
13048 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13049 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13050 }
13051 else
13052 {
13053 /* Get the register (or SIB) value. */
13054 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13055 {
13056 case 0: u64EffAddr = pCtx->rax; break;
13057 case 1: u64EffAddr = pCtx->rcx; break;
13058 case 2: u64EffAddr = pCtx->rdx; break;
13059 case 3: u64EffAddr = pCtx->rbx; break;
13060 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13061 case 6: u64EffAddr = pCtx->rsi; break;
13062 case 7: u64EffAddr = pCtx->rdi; break;
13063 case 8: u64EffAddr = pCtx->r8; break;
13064 case 9: u64EffAddr = pCtx->r9; break;
13065 case 10: u64EffAddr = pCtx->r10; break;
13066 case 11: u64EffAddr = pCtx->r11; break;
13067 case 13: u64EffAddr = pCtx->r13; break;
13068 case 14: u64EffAddr = pCtx->r14; break;
13069 case 15: u64EffAddr = pCtx->r15; break;
13070 /* SIB */
13071 case 4:
13072 case 12:
13073 {
13074 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13075
13076 /* Get the index and scale it. */
13077 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13078 {
13079 case 0: u64EffAddr = pCtx->rax; break;
13080 case 1: u64EffAddr = pCtx->rcx; break;
13081 case 2: u64EffAddr = pCtx->rdx; break;
13082 case 3: u64EffAddr = pCtx->rbx; break;
13083 case 4: u64EffAddr = 0; /*none */ break;
13084 case 5: u64EffAddr = pCtx->rbp; break;
13085 case 6: u64EffAddr = pCtx->rsi; break;
13086 case 7: u64EffAddr = pCtx->rdi; break;
13087 case 8: u64EffAddr = pCtx->r8; break;
13088 case 9: u64EffAddr = pCtx->r9; break;
13089 case 10: u64EffAddr = pCtx->r10; break;
13090 case 11: u64EffAddr = pCtx->r11; break;
13091 case 12: u64EffAddr = pCtx->r12; break;
13092 case 13: u64EffAddr = pCtx->r13; break;
13093 case 14: u64EffAddr = pCtx->r14; break;
13094 case 15: u64EffAddr = pCtx->r15; break;
13095 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13096 }
13097 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13098
13099 /* add base */
13100 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13101 {
13102 case 0: u64EffAddr += pCtx->rax; break;
13103 case 1: u64EffAddr += pCtx->rcx; break;
13104 case 2: u64EffAddr += pCtx->rdx; break;
13105 case 3: u64EffAddr += pCtx->rbx; break;
13106 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13107 case 6: u64EffAddr += pCtx->rsi; break;
13108 case 7: u64EffAddr += pCtx->rdi; break;
13109 case 8: u64EffAddr += pCtx->r8; break;
13110 case 9: u64EffAddr += pCtx->r9; break;
13111 case 10: u64EffAddr += pCtx->r10; break;
13112 case 11: u64EffAddr += pCtx->r11; break;
13113 case 12: u64EffAddr += pCtx->r12; break;
13114 case 14: u64EffAddr += pCtx->r14; break;
13115 case 15: u64EffAddr += pCtx->r15; break;
13116 /* complicated encodings */
13117 case 5:
13118 case 13:
13119 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13120 {
13121 if (!pVCpu->iem.s.uRexB)
13122 {
13123 u64EffAddr += pCtx->rbp;
13124 SET_SS_DEF();
13125 }
13126 else
13127 u64EffAddr += pCtx->r13;
13128 }
13129 else
13130 {
13131 uint32_t u32Disp;
13132 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13133 u64EffAddr += (int32_t)u32Disp;
13134 }
13135 break;
13136 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13137 }
13138 break;
13139 }
13140 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13141 }
13142
13143 /* Get and add the displacement. */
13144 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13145 {
13146 case 0:
13147 break;
13148 case 1:
13149 {
13150 int8_t i8Disp;
13151 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13152 u64EffAddr += i8Disp;
13153 break;
13154 }
13155 case 2:
13156 {
13157 uint32_t u32Disp;
13158 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13159 u64EffAddr += (int32_t)u32Disp;
13160 break;
13161 }
13162 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13163 }
13164
13165 }
13166
13167 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13168 *pGCPtrEff = u64EffAddr;
13169 else
13170 {
13171 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13172 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13173 }
13174 }
13175
13176 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13177 return VINF_SUCCESS;
13178}
13179
13180
13181#ifdef IEM_WITH_SETJMP
13182/**
13183 * Calculates the effective address of a ModR/M memory operand.
13184 *
13185 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13186 *
13187 * May longjmp on internal error.
13188 *
13189 * @return The effective address.
13190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13191 * @param bRm The ModRM byte.
13192 * @param cbImm The size of any immediate following the
13193 * effective address opcode bytes. Important for
13194 * RIP relative addressing.
13195 */
13196IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13197{
13198 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13199 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13200# define SET_SS_DEF() \
13201 do \
13202 { \
13203 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13204 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13205 } while (0)
13206
13207 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13208 {
13209/** @todo Check the effective address size crap! */
13210 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13211 {
13212 uint16_t u16EffAddr;
13213
13214 /* Handle the disp16 form with no registers first. */
13215 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13216 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13217 else
13218 {
13219 /* Get the displacment. */
13220 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13221 {
13222 case 0: u16EffAddr = 0; break;
13223 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13224 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13225 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13226 }
13227
13228 /* Add the base and index registers to the disp. */
13229 switch (bRm & X86_MODRM_RM_MASK)
13230 {
13231 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13232 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13233 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13234 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13235 case 4: u16EffAddr += pCtx->si; break;
13236 case 5: u16EffAddr += pCtx->di; break;
13237 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13238 case 7: u16EffAddr += pCtx->bx; break;
13239 }
13240 }
13241
13242 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13243 return u16EffAddr;
13244 }
13245
13246 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13247 uint32_t u32EffAddr;
13248
13249 /* Handle the disp32 form with no registers first. */
13250 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13251 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13252 else
13253 {
13254 /* Get the register (or SIB) value. */
13255 switch ((bRm & X86_MODRM_RM_MASK))
13256 {
13257 case 0: u32EffAddr = pCtx->eax; break;
13258 case 1: u32EffAddr = pCtx->ecx; break;
13259 case 2: u32EffAddr = pCtx->edx; break;
13260 case 3: u32EffAddr = pCtx->ebx; break;
13261 case 4: /* SIB */
13262 {
13263 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13264
13265 /* Get the index and scale it. */
13266 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13267 {
13268 case 0: u32EffAddr = pCtx->eax; break;
13269 case 1: u32EffAddr = pCtx->ecx; break;
13270 case 2: u32EffAddr = pCtx->edx; break;
13271 case 3: u32EffAddr = pCtx->ebx; break;
13272 case 4: u32EffAddr = 0; /*none */ break;
13273 case 5: u32EffAddr = pCtx->ebp; break;
13274 case 6: u32EffAddr = pCtx->esi; break;
13275 case 7: u32EffAddr = pCtx->edi; break;
13276 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13277 }
13278 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13279
13280 /* add base */
13281 switch (bSib & X86_SIB_BASE_MASK)
13282 {
13283 case 0: u32EffAddr += pCtx->eax; break;
13284 case 1: u32EffAddr += pCtx->ecx; break;
13285 case 2: u32EffAddr += pCtx->edx; break;
13286 case 3: u32EffAddr += pCtx->ebx; break;
13287 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13288 case 5:
13289 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13290 {
13291 u32EffAddr += pCtx->ebp;
13292 SET_SS_DEF();
13293 }
13294 else
13295 {
13296 uint32_t u32Disp;
13297 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13298 u32EffAddr += u32Disp;
13299 }
13300 break;
13301 case 6: u32EffAddr += pCtx->esi; break;
13302 case 7: u32EffAddr += pCtx->edi; break;
13303 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13304 }
13305 break;
13306 }
13307 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13308 case 6: u32EffAddr = pCtx->esi; break;
13309 case 7: u32EffAddr = pCtx->edi; break;
13310 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13311 }
13312
13313 /* Get and add the displacement. */
13314 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13315 {
13316 case 0:
13317 break;
13318 case 1:
13319 {
13320 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13321 u32EffAddr += i8Disp;
13322 break;
13323 }
13324 case 2:
13325 {
13326 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13327 u32EffAddr += u32Disp;
13328 break;
13329 }
13330 default:
13331 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13332 }
13333 }
13334
13335 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13336 {
13337 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13338 return u32EffAddr;
13339 }
13340 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13341 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13342 return u32EffAddr & UINT16_MAX;
13343 }
13344
13345 uint64_t u64EffAddr;
13346
13347 /* Handle the rip+disp32 form with no registers first. */
13348 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13349 {
13350 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13351 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13352 }
13353 else
13354 {
13355 /* Get the register (or SIB) value. */
13356 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13357 {
13358 case 0: u64EffAddr = pCtx->rax; break;
13359 case 1: u64EffAddr = pCtx->rcx; break;
13360 case 2: u64EffAddr = pCtx->rdx; break;
13361 case 3: u64EffAddr = pCtx->rbx; break;
13362 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13363 case 6: u64EffAddr = pCtx->rsi; break;
13364 case 7: u64EffAddr = pCtx->rdi; break;
13365 case 8: u64EffAddr = pCtx->r8; break;
13366 case 9: u64EffAddr = pCtx->r9; break;
13367 case 10: u64EffAddr = pCtx->r10; break;
13368 case 11: u64EffAddr = pCtx->r11; break;
13369 case 13: u64EffAddr = pCtx->r13; break;
13370 case 14: u64EffAddr = pCtx->r14; break;
13371 case 15: u64EffAddr = pCtx->r15; break;
13372 /* SIB */
13373 case 4:
13374 case 12:
13375 {
13376 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13377
13378 /* Get the index and scale it. */
13379 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13380 {
13381 case 0: u64EffAddr = pCtx->rax; break;
13382 case 1: u64EffAddr = pCtx->rcx; break;
13383 case 2: u64EffAddr = pCtx->rdx; break;
13384 case 3: u64EffAddr = pCtx->rbx; break;
13385 case 4: u64EffAddr = 0; /*none */ break;
13386 case 5: u64EffAddr = pCtx->rbp; break;
13387 case 6: u64EffAddr = pCtx->rsi; break;
13388 case 7: u64EffAddr = pCtx->rdi; break;
13389 case 8: u64EffAddr = pCtx->r8; break;
13390 case 9: u64EffAddr = pCtx->r9; break;
13391 case 10: u64EffAddr = pCtx->r10; break;
13392 case 11: u64EffAddr = pCtx->r11; break;
13393 case 12: u64EffAddr = pCtx->r12; break;
13394 case 13: u64EffAddr = pCtx->r13; break;
13395 case 14: u64EffAddr = pCtx->r14; break;
13396 case 15: u64EffAddr = pCtx->r15; break;
13397 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13398 }
13399 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13400
13401 /* add base */
13402 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13403 {
13404 case 0: u64EffAddr += pCtx->rax; break;
13405 case 1: u64EffAddr += pCtx->rcx; break;
13406 case 2: u64EffAddr += pCtx->rdx; break;
13407 case 3: u64EffAddr += pCtx->rbx; break;
13408 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13409 case 6: u64EffAddr += pCtx->rsi; break;
13410 case 7: u64EffAddr += pCtx->rdi; break;
13411 case 8: u64EffAddr += pCtx->r8; break;
13412 case 9: u64EffAddr += pCtx->r9; break;
13413 case 10: u64EffAddr += pCtx->r10; break;
13414 case 11: u64EffAddr += pCtx->r11; break;
13415 case 12: u64EffAddr += pCtx->r12; break;
13416 case 14: u64EffAddr += pCtx->r14; break;
13417 case 15: u64EffAddr += pCtx->r15; break;
13418 /* complicated encodings */
13419 case 5:
13420 case 13:
13421 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13422 {
13423 if (!pVCpu->iem.s.uRexB)
13424 {
13425 u64EffAddr += pCtx->rbp;
13426 SET_SS_DEF();
13427 }
13428 else
13429 u64EffAddr += pCtx->r13;
13430 }
13431 else
13432 {
13433 uint32_t u32Disp;
13434 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13435 u64EffAddr += (int32_t)u32Disp;
13436 }
13437 break;
13438 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13439 }
13440 break;
13441 }
13442 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13443 }
13444
13445 /* Get and add the displacement. */
13446 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13447 {
13448 case 0:
13449 break;
13450 case 1:
13451 {
13452 int8_t i8Disp;
13453 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13454 u64EffAddr += i8Disp;
13455 break;
13456 }
13457 case 2:
13458 {
13459 uint32_t u32Disp;
13460 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13461 u64EffAddr += (int32_t)u32Disp;
13462 break;
13463 }
13464 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13465 }
13466
13467 }
13468
13469 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13470 {
13471 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13472 return u64EffAddr;
13473 }
13474 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13475 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13476 return u64EffAddr & UINT32_MAX;
13477}
13478#endif /* IEM_WITH_SETJMP */
13479
13480
13481/** @} */
13482
13483
13484
13485/*
13486 * Include the instructions
13487 */
13488#include "IEMAllInstructions.cpp.h"
13489
13490
13491
13492
13493#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13494
13495/**
13496 * Sets up execution verification mode.
13497 */
13498IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13499{
13500 PVMCPU pVCpu = pVCpu;
13501 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13502
13503 /*
13504 * Always note down the address of the current instruction.
13505 */
13506 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13507 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13508
13509 /*
13510 * Enable verification and/or logging.
13511 */
13512 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13513 if ( fNewNoRem
13514 && ( 0
13515#if 0 /* auto enable on first paged protected mode interrupt */
13516 || ( pOrgCtx->eflags.Bits.u1IF
13517 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13518 && TRPMHasTrap(pVCpu)
13519 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13520#endif
13521#if 0
13522 || ( pOrgCtx->cs == 0x10
13523 && ( pOrgCtx->rip == 0x90119e3e
13524 || pOrgCtx->rip == 0x901d9810)
13525#endif
13526#if 0 /* Auto enable DSL - FPU stuff. */
13527 || ( pOrgCtx->cs == 0x10
13528 && (// pOrgCtx->rip == 0xc02ec07f
13529 //|| pOrgCtx->rip == 0xc02ec082
13530 //|| pOrgCtx->rip == 0xc02ec0c9
13531 0
13532 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13533#endif
13534#if 0 /* Auto enable DSL - fstp st0 stuff. */
13535 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13536#endif
13537#if 0
13538 || pOrgCtx->rip == 0x9022bb3a
13539#endif
13540#if 0
13541 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13542#endif
13543#if 0
13544 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13545 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13546#endif
13547#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13548 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13549 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13550 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13551#endif
13552#if 0 /* NT4SP1 - xadd early boot. */
13553 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13554#endif
13555#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13556 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13557#endif
13558#if 0 /* NT4SP1 - cmpxchg (AMD). */
13559 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13560#endif
13561#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13562 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13563#endif
13564#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13565 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13566
13567#endif
13568#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13569 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13570
13571#endif
13572#if 0 /* NT4SP1 - frstor [ecx] */
13573 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13574#endif
13575#if 0 /* xxxxxx - All long mode code. */
13576 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13577#endif
13578#if 0 /* rep movsq linux 3.7 64-bit boot. */
13579 || (pOrgCtx->rip == 0x0000000000100241)
13580#endif
13581#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13582 || (pOrgCtx->rip == 0x000000000215e240)
13583#endif
13584#if 0 /* DOS's size-overridden iret to v8086. */
13585 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13586#endif
13587 )
13588 )
13589 {
13590 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13591 RTLogFlags(NULL, "enabled");
13592 fNewNoRem = false;
13593 }
13594 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13595 {
13596 pVCpu->iem.s.fNoRem = fNewNoRem;
13597 if (!fNewNoRem)
13598 {
13599 LogAlways(("Enabling verification mode!\n"));
13600 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13601 }
13602 else
13603 LogAlways(("Disabling verification mode!\n"));
13604 }
13605
13606 /*
13607 * Switch state.
13608 */
13609 if (IEM_VERIFICATION_ENABLED(pVCpu))
13610 {
13611 static CPUMCTX s_DebugCtx; /* Ugly! */
13612
13613 s_DebugCtx = *pOrgCtx;
13614 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13615 }
13616
13617 /*
13618 * See if there is an interrupt pending in TRPM and inject it if we can.
13619 */
13620 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13621 if ( pOrgCtx->eflags.Bits.u1IF
13622 && TRPMHasTrap(pVCpu)
13623 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13624 {
13625 uint8_t u8TrapNo;
13626 TRPMEVENT enmType;
13627 RTGCUINT uErrCode;
13628 RTGCPTR uCr2;
13629 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13630 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13631 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13632 TRPMResetTrap(pVCpu);
13633 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13634 }
13635
13636 /*
13637 * Reset the counters.
13638 */
13639 pVCpu->iem.s.cIOReads = 0;
13640 pVCpu->iem.s.cIOWrites = 0;
13641 pVCpu->iem.s.fIgnoreRaxRdx = false;
13642 pVCpu->iem.s.fOverlappingMovs = false;
13643 pVCpu->iem.s.fProblematicMemory = false;
13644 pVCpu->iem.s.fUndefinedEFlags = 0;
13645
13646 if (IEM_VERIFICATION_ENABLED(pVCpu))
13647 {
13648 /*
13649 * Free all verification records.
13650 */
13651 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13652 pVCpu->iem.s.pIemEvtRecHead = NULL;
13653 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13654 do
13655 {
13656 while (pEvtRec)
13657 {
13658 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13659 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13660 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13661 pEvtRec = pNext;
13662 }
13663 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13664 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13665 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13666 } while (pEvtRec);
13667 }
13668}
13669
13670
13671/**
13672 * Allocate an event record.
13673 * @returns Pointer to a record.
13674 */
13675IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13676{
13677 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13678 return NULL;
13679
13680 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13681 if (pEvtRec)
13682 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13683 else
13684 {
13685 if (!pVCpu->iem.s.ppIemEvtRecNext)
13686 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13687
13688 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13689 if (!pEvtRec)
13690 return NULL;
13691 }
13692 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13693 pEvtRec->pNext = NULL;
13694 return pEvtRec;
13695}
13696
13697
13698/**
13699 * IOMMMIORead notification.
13700 */
13701VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
13702{
13703 PVMCPU pVCpu = VMMGetCpu(pVM);
13704 if (!pVCpu)
13705 return;
13706 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13707 if (!pEvtRec)
13708 return;
13709 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
13710 pEvtRec->u.RamRead.GCPhys = GCPhys;
13711 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
13712 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13713 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13714}
13715
13716
13717/**
13718 * IOMMMIOWrite notification.
13719 */
13720VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
13721{
13722 PVMCPU pVCpu = VMMGetCpu(pVM);
13723 if (!pVCpu)
13724 return;
13725 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13726 if (!pEvtRec)
13727 return;
13728 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
13729 pEvtRec->u.RamWrite.GCPhys = GCPhys;
13730 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
13731 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
13732 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
13733 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
13734 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
13735 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13736 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13737}
13738
13739
13740/**
13741 * IOMIOPortRead notification.
13742 */
13743VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
13744{
13745 PVMCPU pVCpu = VMMGetCpu(pVM);
13746 if (!pVCpu)
13747 return;
13748 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13749 if (!pEvtRec)
13750 return;
13751 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13752 pEvtRec->u.IOPortRead.Port = Port;
13753 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13754 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13755 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13756}
13757
13758/**
13759 * IOMIOPortWrite notification.
13760 */
13761VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13762{
13763 PVMCPU pVCpu = VMMGetCpu(pVM);
13764 if (!pVCpu)
13765 return;
13766 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13767 if (!pEvtRec)
13768 return;
13769 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13770 pEvtRec->u.IOPortWrite.Port = Port;
13771 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13772 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13773 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13774 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13775}
13776
13777
13778VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
13779{
13780 PVMCPU pVCpu = VMMGetCpu(pVM);
13781 if (!pVCpu)
13782 return;
13783 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13784 if (!pEvtRec)
13785 return;
13786 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
13787 pEvtRec->u.IOPortStrRead.Port = Port;
13788 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
13789 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
13790 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13791 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13792}
13793
13794
13795VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
13796{
13797 PVMCPU pVCpu = VMMGetCpu(pVM);
13798 if (!pVCpu)
13799 return;
13800 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13801 if (!pEvtRec)
13802 return;
13803 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
13804 pEvtRec->u.IOPortStrWrite.Port = Port;
13805 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
13806 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
13807 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13808 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13809}
13810
13811
13812/**
13813 * Fakes and records an I/O port read.
13814 *
13815 * @returns VINF_SUCCESS.
13816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13817 * @param Port The I/O port.
13818 * @param pu32Value Where to store the fake value.
13819 * @param cbValue The size of the access.
13820 */
13821IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13822{
13823 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13824 if (pEvtRec)
13825 {
13826 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13827 pEvtRec->u.IOPortRead.Port = Port;
13828 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13829 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13830 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13831 }
13832 pVCpu->iem.s.cIOReads++;
13833 *pu32Value = 0xcccccccc;
13834 return VINF_SUCCESS;
13835}
13836
13837
13838/**
13839 * Fakes and records an I/O port write.
13840 *
13841 * @returns VINF_SUCCESS.
13842 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13843 * @param Port The I/O port.
13844 * @param u32Value The value being written.
13845 * @param cbValue The size of the access.
13846 */
13847IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13848{
13849 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13850 if (pEvtRec)
13851 {
13852 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13853 pEvtRec->u.IOPortWrite.Port = Port;
13854 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13855 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13856 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13857 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13858 }
13859 pVCpu->iem.s.cIOWrites++;
13860 return VINF_SUCCESS;
13861}
13862
13863
13864/**
13865 * Used to add extra details about a stub case.
13866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13867 */
13868IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
13869{
13870 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13871 PVM pVM = pVCpu->CTX_SUFF(pVM);
13872 PVMCPU pVCpu = pVCpu;
13873 char szRegs[4096];
13874 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
13875 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
13876 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
13877 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
13878 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
13879 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
13880 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
13881 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
13882 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
13883 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
13884 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
13885 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
13886 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
13887 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
13888 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
13889 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
13890 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
13891 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
13892 " efer=%016VR{efer}\n"
13893 " pat=%016VR{pat}\n"
13894 " sf_mask=%016VR{sf_mask}\n"
13895 "krnl_gs_base=%016VR{krnl_gs_base}\n"
13896 " lstar=%016VR{lstar}\n"
13897 " star=%016VR{star} cstar=%016VR{cstar}\n"
13898 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
13899 );
13900
13901 char szInstr1[256];
13902 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
13903 DBGF_DISAS_FLAGS_DEFAULT_MODE,
13904 szInstr1, sizeof(szInstr1), NULL);
13905 char szInstr2[256];
13906 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
13907 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13908 szInstr2, sizeof(szInstr2), NULL);
13909
13910 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
13911}
13912
13913
13914/**
13915 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
13916 * dump to the assertion info.
13917 *
13918 * @param pEvtRec The record to dump.
13919 */
13920IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
13921{
13922 switch (pEvtRec->enmEvent)
13923 {
13924 case IEMVERIFYEVENT_IOPORT_READ:
13925 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
13926 pEvtRec->u.IOPortWrite.Port,
13927 pEvtRec->u.IOPortWrite.cbValue);
13928 break;
13929 case IEMVERIFYEVENT_IOPORT_WRITE:
13930 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
13931 pEvtRec->u.IOPortWrite.Port,
13932 pEvtRec->u.IOPortWrite.cbValue,
13933 pEvtRec->u.IOPortWrite.u32Value);
13934 break;
13935 case IEMVERIFYEVENT_IOPORT_STR_READ:
13936 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
13937 pEvtRec->u.IOPortStrWrite.Port,
13938 pEvtRec->u.IOPortStrWrite.cbValue,
13939 pEvtRec->u.IOPortStrWrite.cTransfers);
13940 break;
13941 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13942 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
13943 pEvtRec->u.IOPortStrWrite.Port,
13944 pEvtRec->u.IOPortStrWrite.cbValue,
13945 pEvtRec->u.IOPortStrWrite.cTransfers);
13946 break;
13947 case IEMVERIFYEVENT_RAM_READ:
13948 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
13949 pEvtRec->u.RamRead.GCPhys,
13950 pEvtRec->u.RamRead.cb);
13951 break;
13952 case IEMVERIFYEVENT_RAM_WRITE:
13953 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
13954 pEvtRec->u.RamWrite.GCPhys,
13955 pEvtRec->u.RamWrite.cb,
13956 (int)pEvtRec->u.RamWrite.cb,
13957 pEvtRec->u.RamWrite.ab);
13958 break;
13959 default:
13960 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
13961 break;
13962 }
13963}
13964
13965
13966/**
13967 * Raises an assertion on the specified record, showing the given message with
13968 * a record dump attached.
13969 *
13970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13971 * @param pEvtRec1 The first record.
13972 * @param pEvtRec2 The second record.
13973 * @param pszMsg The message explaining why we're asserting.
13974 */
13975IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13976{
13977 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13978 iemVerifyAssertAddRecordDump(pEvtRec1);
13979 iemVerifyAssertAddRecordDump(pEvtRec2);
13980 iemVerifyAssertMsg2(pVCpu);
13981 RTAssertPanic();
13982}
13983
13984
13985/**
13986 * Raises an assertion on the specified record, showing the given message with
13987 * a record dump attached.
13988 *
13989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13990 * @param pEvtRec1 The first record.
13991 * @param pszMsg The message explaining why we're asserting.
13992 */
13993IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13994{
13995 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13996 iemVerifyAssertAddRecordDump(pEvtRec);
13997 iemVerifyAssertMsg2(pVCpu);
13998 RTAssertPanic();
13999}
14000
14001
14002/**
14003 * Verifies a write record.
14004 *
14005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14006 * @param pEvtRec The write record.
14007 * @param fRem Set if REM was doing the other executing. If clear
14008 * it was HM.
14009 */
14010IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14011{
14012 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14013 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14014 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14015 if ( RT_FAILURE(rc)
14016 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14017 {
14018 /* fend off ins */
14019 if ( !pVCpu->iem.s.cIOReads
14020 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14021 || ( pEvtRec->u.RamWrite.cb != 1
14022 && pEvtRec->u.RamWrite.cb != 2
14023 && pEvtRec->u.RamWrite.cb != 4) )
14024 {
14025 /* fend off ROMs and MMIO */
14026 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14027 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14028 {
14029 /* fend off fxsave */
14030 if (pEvtRec->u.RamWrite.cb != 512)
14031 {
14032 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14033 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14034 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14035 RTAssertMsg2Add("%s: %.*Rhxs\n"
14036 "iem: %.*Rhxs\n",
14037 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14038 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14039 iemVerifyAssertAddRecordDump(pEvtRec);
14040 iemVerifyAssertMsg2(pVCpu);
14041 RTAssertPanic();
14042 }
14043 }
14044 }
14045 }
14046
14047}
14048
14049/**
14050 * Performs the post-execution verfication checks.
14051 */
14052IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14053{
14054 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14055 return rcStrictIem;
14056
14057 /*
14058 * Switch back the state.
14059 */
14060 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14061 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14062 Assert(pOrgCtx != pDebugCtx);
14063 IEM_GET_CTX(pVCpu) = pOrgCtx;
14064
14065 /*
14066 * Execute the instruction in REM.
14067 */
14068 bool fRem = false;
14069 PVM pVM = pVCpu->CTX_SUFF(pVM);
14070 PVMCPU pVCpu = pVCpu;
14071 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14072#ifdef IEM_VERIFICATION_MODE_FULL_HM
14073 if ( HMIsEnabled(pVM)
14074 && pVCpu->iem.s.cIOReads == 0
14075 && pVCpu->iem.s.cIOWrites == 0
14076 && !pVCpu->iem.s.fProblematicMemory)
14077 {
14078 uint64_t uStartRip = pOrgCtx->rip;
14079 unsigned iLoops = 0;
14080 do
14081 {
14082 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14083 iLoops++;
14084 } while ( rc == VINF_SUCCESS
14085 || ( rc == VINF_EM_DBG_STEPPED
14086 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14087 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14088 || ( pOrgCtx->rip != pDebugCtx->rip
14089 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14090 && iLoops < 8) );
14091 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14092 rc = VINF_SUCCESS;
14093 }
14094#endif
14095 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14096 || rc == VINF_IOM_R3_IOPORT_READ
14097 || rc == VINF_IOM_R3_IOPORT_WRITE
14098 || rc == VINF_IOM_R3_MMIO_READ
14099 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14100 || rc == VINF_IOM_R3_MMIO_WRITE
14101 || rc == VINF_CPUM_R3_MSR_READ
14102 || rc == VINF_CPUM_R3_MSR_WRITE
14103 || rc == VINF_EM_RESCHEDULE
14104 )
14105 {
14106 EMRemLock(pVM);
14107 rc = REMR3EmulateInstruction(pVM, pVCpu);
14108 AssertRC(rc);
14109 EMRemUnlock(pVM);
14110 fRem = true;
14111 }
14112
14113# if 1 /* Skip unimplemented instructions for now. */
14114 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14115 {
14116 IEM_GET_CTX(pVCpu) = pOrgCtx;
14117 if (rc == VINF_EM_DBG_STEPPED)
14118 return VINF_SUCCESS;
14119 return rc;
14120 }
14121# endif
14122
14123 /*
14124 * Compare the register states.
14125 */
14126 unsigned cDiffs = 0;
14127 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14128 {
14129 //Log(("REM and IEM ends up with different registers!\n"));
14130 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14131
14132# define CHECK_FIELD(a_Field) \
14133 do \
14134 { \
14135 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14136 { \
14137 switch (sizeof(pOrgCtx->a_Field)) \
14138 { \
14139 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14140 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14141 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14142 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14143 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14144 } \
14145 cDiffs++; \
14146 } \
14147 } while (0)
14148# define CHECK_XSTATE_FIELD(a_Field) \
14149 do \
14150 { \
14151 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14152 { \
14153 switch (sizeof(pOrgXState->a_Field)) \
14154 { \
14155 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14156 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14157 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14158 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14159 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14160 } \
14161 cDiffs++; \
14162 } \
14163 } while (0)
14164
14165# define CHECK_BIT_FIELD(a_Field) \
14166 do \
14167 { \
14168 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14169 { \
14170 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14171 cDiffs++; \
14172 } \
14173 } while (0)
14174
14175# define CHECK_SEL(a_Sel) \
14176 do \
14177 { \
14178 CHECK_FIELD(a_Sel.Sel); \
14179 CHECK_FIELD(a_Sel.Attr.u); \
14180 CHECK_FIELD(a_Sel.u64Base); \
14181 CHECK_FIELD(a_Sel.u32Limit); \
14182 CHECK_FIELD(a_Sel.fFlags); \
14183 } while (0)
14184
14185 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14186 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14187
14188#if 1 /* The recompiler doesn't update these the intel way. */
14189 if (fRem)
14190 {
14191 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14192 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14193 pOrgXState->x87.CS = pDebugXState->x87.CS;
14194 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14195 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14196 pOrgXState->x87.DS = pDebugXState->x87.DS;
14197 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14198 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14199 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14200 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14201 }
14202#endif
14203 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14204 {
14205 RTAssertMsg2Weak(" the FPU state differs\n");
14206 cDiffs++;
14207 CHECK_XSTATE_FIELD(x87.FCW);
14208 CHECK_XSTATE_FIELD(x87.FSW);
14209 CHECK_XSTATE_FIELD(x87.FTW);
14210 CHECK_XSTATE_FIELD(x87.FOP);
14211 CHECK_XSTATE_FIELD(x87.FPUIP);
14212 CHECK_XSTATE_FIELD(x87.CS);
14213 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14214 CHECK_XSTATE_FIELD(x87.FPUDP);
14215 CHECK_XSTATE_FIELD(x87.DS);
14216 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14217 CHECK_XSTATE_FIELD(x87.MXCSR);
14218 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14219 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14220 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14221 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14222 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14223 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14224 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14225 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14226 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14227 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14228 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14229 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14230 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14231 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14232 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14233 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14234 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14235 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14236 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14237 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14238 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14239 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14240 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14241 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14242 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14243 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14244 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14245 }
14246 CHECK_FIELD(rip);
14247 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14248 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14249 {
14250 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14251 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14252 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14253 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14254 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14255 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14256 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14257 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14258 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14259 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14260 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14261 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14262 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14263 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14264 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14265 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14266 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14267 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14268 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14269 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14270 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14271 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14272 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14273 }
14274
14275 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14276 CHECK_FIELD(rax);
14277 CHECK_FIELD(rcx);
14278 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14279 CHECK_FIELD(rdx);
14280 CHECK_FIELD(rbx);
14281 CHECK_FIELD(rsp);
14282 CHECK_FIELD(rbp);
14283 CHECK_FIELD(rsi);
14284 CHECK_FIELD(rdi);
14285 CHECK_FIELD(r8);
14286 CHECK_FIELD(r9);
14287 CHECK_FIELD(r10);
14288 CHECK_FIELD(r11);
14289 CHECK_FIELD(r12);
14290 CHECK_FIELD(r13);
14291 CHECK_SEL(cs);
14292 CHECK_SEL(ss);
14293 CHECK_SEL(ds);
14294 CHECK_SEL(es);
14295 CHECK_SEL(fs);
14296 CHECK_SEL(gs);
14297 CHECK_FIELD(cr0);
14298
14299 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14300 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14301 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14302 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14303 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14304 {
14305 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14306 { /* ignore */ }
14307 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14308 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14309 && fRem)
14310 { /* ignore */ }
14311 else
14312 CHECK_FIELD(cr2);
14313 }
14314 CHECK_FIELD(cr3);
14315 CHECK_FIELD(cr4);
14316 CHECK_FIELD(dr[0]);
14317 CHECK_FIELD(dr[1]);
14318 CHECK_FIELD(dr[2]);
14319 CHECK_FIELD(dr[3]);
14320 CHECK_FIELD(dr[6]);
14321 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14322 CHECK_FIELD(dr[7]);
14323 CHECK_FIELD(gdtr.cbGdt);
14324 CHECK_FIELD(gdtr.pGdt);
14325 CHECK_FIELD(idtr.cbIdt);
14326 CHECK_FIELD(idtr.pIdt);
14327 CHECK_SEL(ldtr);
14328 CHECK_SEL(tr);
14329 CHECK_FIELD(SysEnter.cs);
14330 CHECK_FIELD(SysEnter.eip);
14331 CHECK_FIELD(SysEnter.esp);
14332 CHECK_FIELD(msrEFER);
14333 CHECK_FIELD(msrSTAR);
14334 CHECK_FIELD(msrPAT);
14335 CHECK_FIELD(msrLSTAR);
14336 CHECK_FIELD(msrCSTAR);
14337 CHECK_FIELD(msrSFMASK);
14338 CHECK_FIELD(msrKERNELGSBASE);
14339
14340 if (cDiffs != 0)
14341 {
14342 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14343 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14344 RTAssertPanic();
14345 static bool volatile s_fEnterDebugger = true;
14346 if (s_fEnterDebugger)
14347 DBGFSTOP(pVM);
14348
14349# if 1 /* Ignore unimplemented instructions for now. */
14350 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14351 rcStrictIem = VINF_SUCCESS;
14352# endif
14353 }
14354# undef CHECK_FIELD
14355# undef CHECK_BIT_FIELD
14356 }
14357
14358 /*
14359 * If the register state compared fine, check the verification event
14360 * records.
14361 */
14362 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14363 {
14364 /*
14365 * Compare verficiation event records.
14366 * - I/O port accesses should be a 1:1 match.
14367 */
14368 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14369 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14370 while (pIemRec && pOtherRec)
14371 {
14372 /* Since we might miss RAM writes and reads, ignore reads and check
14373 that any written memory is the same extra ones. */
14374 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14375 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14376 && pIemRec->pNext)
14377 {
14378 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14379 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14380 pIemRec = pIemRec->pNext;
14381 }
14382
14383 /* Do the compare. */
14384 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14385 {
14386 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14387 break;
14388 }
14389 bool fEquals;
14390 switch (pIemRec->enmEvent)
14391 {
14392 case IEMVERIFYEVENT_IOPORT_READ:
14393 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14394 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14395 break;
14396 case IEMVERIFYEVENT_IOPORT_WRITE:
14397 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14398 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14399 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14400 break;
14401 case IEMVERIFYEVENT_IOPORT_STR_READ:
14402 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14403 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14404 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14405 break;
14406 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14407 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14408 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14409 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14410 break;
14411 case IEMVERIFYEVENT_RAM_READ:
14412 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14413 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14414 break;
14415 case IEMVERIFYEVENT_RAM_WRITE:
14416 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14417 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14418 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14419 break;
14420 default:
14421 fEquals = false;
14422 break;
14423 }
14424 if (!fEquals)
14425 {
14426 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14427 break;
14428 }
14429
14430 /* advance */
14431 pIemRec = pIemRec->pNext;
14432 pOtherRec = pOtherRec->pNext;
14433 }
14434
14435 /* Ignore extra writes and reads. */
14436 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14437 {
14438 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14439 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14440 pIemRec = pIemRec->pNext;
14441 }
14442 if (pIemRec != NULL)
14443 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14444 else if (pOtherRec != NULL)
14445 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14446 }
14447 IEM_GET_CTX(pVCpu) = pOrgCtx;
14448
14449 return rcStrictIem;
14450}
14451
14452#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14453
14454/* stubs */
14455IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14456{
14457 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14458 return VERR_INTERNAL_ERROR;
14459}
14460
14461IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14462{
14463 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14464 return VERR_INTERNAL_ERROR;
14465}
14466
14467#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14468
14469
14470#ifdef LOG_ENABLED
14471/**
14472 * Logs the current instruction.
14473 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14474 * @param pCtx The current CPU context.
14475 * @param fSameCtx Set if we have the same context information as the VMM,
14476 * clear if we may have already executed an instruction in
14477 * our debug context. When clear, we assume IEMCPU holds
14478 * valid CPU mode info.
14479 */
14480IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14481{
14482# ifdef IN_RING3
14483 if (LogIs2Enabled())
14484 {
14485 char szInstr[256];
14486 uint32_t cbInstr = 0;
14487 if (fSameCtx)
14488 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14489 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14490 szInstr, sizeof(szInstr), &cbInstr);
14491 else
14492 {
14493 uint32_t fFlags = 0;
14494 switch (pVCpu->iem.s.enmCpuMode)
14495 {
14496 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14497 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14498 case IEMMODE_16BIT:
14499 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14500 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14501 else
14502 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14503 break;
14504 }
14505 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14506 szInstr, sizeof(szInstr), &cbInstr);
14507 }
14508
14509 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14510 Log2(("****\n"
14511 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14512 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14513 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14514 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14515 " %s\n"
14516 ,
14517 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14518 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14519 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14520 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14521 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14522 szInstr));
14523
14524 if (LogIs3Enabled())
14525 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14526 }
14527 else
14528# endif
14529 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14530 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14531 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14532}
14533#endif
14534
14535
14536/**
14537 * Makes status code addjustments (pass up from I/O and access handler)
14538 * as well as maintaining statistics.
14539 *
14540 * @returns Strict VBox status code to pass up.
14541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14542 * @param rcStrict The status from executing an instruction.
14543 */
14544DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14545{
14546 if (rcStrict != VINF_SUCCESS)
14547 {
14548 if (RT_SUCCESS(rcStrict))
14549 {
14550 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14551 || rcStrict == VINF_IOM_R3_IOPORT_READ
14552 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14553 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14554 || rcStrict == VINF_IOM_R3_MMIO_READ
14555 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14556 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14557 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14558 || rcStrict == VINF_CPUM_R3_MSR_READ
14559 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14560 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14561 || rcStrict == VINF_EM_RAW_TO_R3
14562 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14563 /* raw-mode / virt handlers only: */
14564 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14565 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14566 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14567 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14568 || rcStrict == VINF_SELM_SYNC_GDT
14569 || rcStrict == VINF_CSAM_PENDING_ACTION
14570 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14571 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14572/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14573 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14574 if (rcPassUp == VINF_SUCCESS)
14575 pVCpu->iem.s.cRetInfStatuses++;
14576 else if ( rcPassUp < VINF_EM_FIRST
14577 || rcPassUp > VINF_EM_LAST
14578 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14579 {
14580 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14581 pVCpu->iem.s.cRetPassUpStatus++;
14582 rcStrict = rcPassUp;
14583 }
14584 else
14585 {
14586 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14587 pVCpu->iem.s.cRetInfStatuses++;
14588 }
14589 }
14590 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14591 pVCpu->iem.s.cRetAspectNotImplemented++;
14592 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14593 pVCpu->iem.s.cRetInstrNotImplemented++;
14594#ifdef IEM_VERIFICATION_MODE_FULL
14595 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14596 rcStrict = VINF_SUCCESS;
14597#endif
14598 else
14599 pVCpu->iem.s.cRetErrStatuses++;
14600 }
14601 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14602 {
14603 pVCpu->iem.s.cRetPassUpStatus++;
14604 rcStrict = pVCpu->iem.s.rcPassUp;
14605 }
14606
14607 return rcStrict;
14608}
14609
14610
14611/**
14612 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14613 * IEMExecOneWithPrefetchedByPC.
14614 *
14615 * Similar code is found in IEMExecLots.
14616 *
14617 * @return Strict VBox status code.
14618 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14620 * @param fExecuteInhibit If set, execute the instruction following CLI,
14621 * POP SS and MOV SS,GR.
14622 */
14623DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14624{
14625#ifdef IEM_WITH_SETJMP
14626 VBOXSTRICTRC rcStrict;
14627 jmp_buf JmpBuf;
14628 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14629 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14630 if ((rcStrict = setjmp(JmpBuf)) == 0)
14631 {
14632 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14633 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14634 }
14635 else
14636 pVCpu->iem.s.cLongJumps++;
14637 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14638#else
14639 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14640 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14641#endif
14642 if (rcStrict == VINF_SUCCESS)
14643 pVCpu->iem.s.cInstructions++;
14644 if (pVCpu->iem.s.cActiveMappings > 0)
14645 {
14646 Assert(rcStrict != VINF_SUCCESS);
14647 iemMemRollback(pVCpu);
14648 }
14649//#ifdef DEBUG
14650// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14651//#endif
14652
14653 /* Execute the next instruction as well if a cli, pop ss or
14654 mov ss, Gr has just completed successfully. */
14655 if ( fExecuteInhibit
14656 && rcStrict == VINF_SUCCESS
14657 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14658 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14659 {
14660 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14661 if (rcStrict == VINF_SUCCESS)
14662 {
14663#ifdef LOG_ENABLED
14664 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14665#endif
14666#ifdef IEM_WITH_SETJMP
14667 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14668 if ((rcStrict = setjmp(JmpBuf)) == 0)
14669 {
14670 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14671 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14672 }
14673 else
14674 pVCpu->iem.s.cLongJumps++;
14675 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14676#else
14677 IEM_OPCODE_GET_NEXT_U8(&b);
14678 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14679#endif
14680 if (rcStrict == VINF_SUCCESS)
14681 pVCpu->iem.s.cInstructions++;
14682 if (pVCpu->iem.s.cActiveMappings > 0)
14683 {
14684 Assert(rcStrict != VINF_SUCCESS);
14685 iemMemRollback(pVCpu);
14686 }
14687 }
14688 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14689 }
14690
14691 /*
14692 * Return value fiddling, statistics and sanity assertions.
14693 */
14694 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14695
14696 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14697 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14698#if defined(IEM_VERIFICATION_MODE_FULL)
14699 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14700 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14701 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14702 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14703#endif
14704 return rcStrict;
14705}
14706
14707
14708#ifdef IN_RC
14709/**
14710 * Re-enters raw-mode or ensure we return to ring-3.
14711 *
14712 * @returns rcStrict, maybe modified.
14713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14714 * @param pCtx The current CPU context.
14715 * @param rcStrict The status code returne by the interpreter.
14716 */
14717DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
14718{
14719 if ( !pVCpu->iem.s.fInPatchCode
14720 && ( rcStrict == VINF_SUCCESS
14721 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14722 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14723 {
14724 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14725 CPUMRawEnter(pVCpu);
14726 else
14727 {
14728 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14729 rcStrict = VINF_EM_RESCHEDULE;
14730 }
14731 }
14732 return rcStrict;
14733}
14734#endif
14735
14736
14737/**
14738 * Execute one instruction.
14739 *
14740 * @return Strict VBox status code.
14741 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14742 */
14743VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14744{
14745#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14746 if (++pVCpu->iem.s.cVerifyDepth == 1)
14747 iemExecVerificationModeSetup(pVCpu);
14748#endif
14749#ifdef LOG_ENABLED
14750 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14751 iemLogCurInstr(pVCpu, pCtx, true);
14752#endif
14753
14754 /*
14755 * Do the decoding and emulation.
14756 */
14757 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14758 if (rcStrict == VINF_SUCCESS)
14759 rcStrict = iemExecOneInner(pVCpu, true);
14760
14761#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14762 /*
14763 * Assert some sanity.
14764 */
14765 if (pVCpu->iem.s.cVerifyDepth == 1)
14766 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14767 pVCpu->iem.s.cVerifyDepth--;
14768#endif
14769#ifdef IN_RC
14770 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14771#endif
14772 if (rcStrict != VINF_SUCCESS)
14773 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14774 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14775 return rcStrict;
14776}
14777
14778
14779VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14780{
14781 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14782 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14783
14784 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14785 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14786 if (rcStrict == VINF_SUCCESS)
14787 {
14788 rcStrict = iemExecOneInner(pVCpu, true);
14789 if (pcbWritten)
14790 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14791 }
14792
14793#ifdef IN_RC
14794 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14795#endif
14796 return rcStrict;
14797}
14798
14799
14800VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14801 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14802{
14803 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14804 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14805
14806 VBOXSTRICTRC rcStrict;
14807 if ( cbOpcodeBytes
14808 && pCtx->rip == OpcodeBytesPC)
14809 {
14810 iemInitDecoder(pVCpu, false);
14811#ifdef IEM_WITH_CODE_TLB
14812 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14813 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14814 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14815 pVCpu->iem.s.offCurInstrStart = 0;
14816 pVCpu->iem.s.offInstrNextByte = 0;
14817#else
14818 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14819 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14820#endif
14821 rcStrict = VINF_SUCCESS;
14822 }
14823 else
14824 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14825 if (rcStrict == VINF_SUCCESS)
14826 {
14827 rcStrict = iemExecOneInner(pVCpu, true);
14828 }
14829
14830#ifdef IN_RC
14831 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14832#endif
14833 return rcStrict;
14834}
14835
14836
14837VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14838{
14839 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14840 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14841
14842 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14843 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14844 if (rcStrict == VINF_SUCCESS)
14845 {
14846 rcStrict = iemExecOneInner(pVCpu, false);
14847 if (pcbWritten)
14848 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14849 }
14850
14851#ifdef IN_RC
14852 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14853#endif
14854 return rcStrict;
14855}
14856
14857
14858VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14859 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14860{
14861 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14862 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14863
14864 VBOXSTRICTRC rcStrict;
14865 if ( cbOpcodeBytes
14866 && pCtx->rip == OpcodeBytesPC)
14867 {
14868 iemInitDecoder(pVCpu, true);
14869#ifdef IEM_WITH_CODE_TLB
14870 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14871 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14872 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14873 pVCpu->iem.s.offCurInstrStart = 0;
14874 pVCpu->iem.s.offInstrNextByte = 0;
14875#else
14876 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14877 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14878#endif
14879 rcStrict = VINF_SUCCESS;
14880 }
14881 else
14882 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14883 if (rcStrict == VINF_SUCCESS)
14884 rcStrict = iemExecOneInner(pVCpu, false);
14885
14886#ifdef IN_RC
14887 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14888#endif
14889 return rcStrict;
14890}
14891
14892
14893/**
14894 * For debugging DISGetParamSize, may come in handy.
14895 *
14896 * @returns Strict VBox status code.
14897 * @param pVCpu The cross context virtual CPU structure of the
14898 * calling EMT.
14899 * @param pCtxCore The context core structure.
14900 * @param OpcodeBytesPC The PC of the opcode bytes.
14901 * @param pvOpcodeBytes Prefeched opcode bytes.
14902 * @param cbOpcodeBytes Number of prefetched bytes.
14903 * @param pcbWritten Where to return the number of bytes written.
14904 * Optional.
14905 */
14906VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14907 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14908 uint32_t *pcbWritten)
14909{
14910 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14911 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14912
14913 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14914 VBOXSTRICTRC rcStrict;
14915 if ( cbOpcodeBytes
14916 && pCtx->rip == OpcodeBytesPC)
14917 {
14918 iemInitDecoder(pVCpu, true);
14919#ifdef IEM_WITH_CODE_TLB
14920 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14921 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14922 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14923 pVCpu->iem.s.offCurInstrStart = 0;
14924 pVCpu->iem.s.offInstrNextByte = 0;
14925#else
14926 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14927 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14928#endif
14929 rcStrict = VINF_SUCCESS;
14930 }
14931 else
14932 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14933 if (rcStrict == VINF_SUCCESS)
14934 {
14935 rcStrict = iemExecOneInner(pVCpu, false);
14936 if (pcbWritten)
14937 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14938 }
14939
14940#ifdef IN_RC
14941 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14942#endif
14943 return rcStrict;
14944}
14945
14946
14947VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14948{
14949 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14950
14951#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14952 /*
14953 * See if there is an interrupt pending in TRPM, inject it if we can.
14954 */
14955 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14956# ifdef IEM_VERIFICATION_MODE_FULL
14957 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14958# endif
14959 if ( pCtx->eflags.Bits.u1IF
14960 && TRPMHasTrap(pVCpu)
14961 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14962 {
14963 uint8_t u8TrapNo;
14964 TRPMEVENT enmType;
14965 RTGCUINT uErrCode;
14966 RTGCPTR uCr2;
14967 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14968 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14969 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14970 TRPMResetTrap(pVCpu);
14971 }
14972
14973 /*
14974 * Log the state.
14975 */
14976# ifdef LOG_ENABLED
14977 iemLogCurInstr(pVCpu, pCtx, true);
14978# endif
14979
14980 /*
14981 * Do the decoding and emulation.
14982 */
14983 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14984 if (rcStrict == VINF_SUCCESS)
14985 rcStrict = iemExecOneInner(pVCpu, true);
14986
14987 /*
14988 * Assert some sanity.
14989 */
14990 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14991
14992 /*
14993 * Log and return.
14994 */
14995 if (rcStrict != VINF_SUCCESS)
14996 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14997 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14998 if (pcInstructions)
14999 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15000 return rcStrict;
15001
15002#else /* Not verification mode */
15003
15004 /*
15005 * See if there is an interrupt pending in TRPM, inject it if we can.
15006 */
15007 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15008# ifdef IEM_VERIFICATION_MODE_FULL
15009 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15010# endif
15011 if ( pCtx->eflags.Bits.u1IF
15012 && TRPMHasTrap(pVCpu)
15013 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15014 {
15015 uint8_t u8TrapNo;
15016 TRPMEVENT enmType;
15017 RTGCUINT uErrCode;
15018 RTGCPTR uCr2;
15019 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15020 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15021 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15022 TRPMResetTrap(pVCpu);
15023 }
15024
15025 /*
15026 * Initial decoder init w/ prefetch, then setup setjmp.
15027 */
15028 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15029 if (rcStrict == VINF_SUCCESS)
15030 {
15031# ifdef IEM_WITH_SETJMP
15032 jmp_buf JmpBuf;
15033 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15034 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15035 pVCpu->iem.s.cActiveMappings = 0;
15036 if ((rcStrict = setjmp(JmpBuf)) == 0)
15037# endif
15038 {
15039 /*
15040 * The run loop. We limit ourselves to 4096 instructions right now.
15041 */
15042 PVM pVM = pVCpu->CTX_SUFF(pVM);
15043 uint32_t cInstr = 4096;
15044 for (;;)
15045 {
15046 /*
15047 * Log the state.
15048 */
15049# ifdef LOG_ENABLED
15050 iemLogCurInstr(pVCpu, pCtx, true);
15051# endif
15052
15053 /*
15054 * Do the decoding and emulation.
15055 */
15056 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15057 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15058 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15059 {
15060 Assert(pVCpu->iem.s.cActiveMappings == 0);
15061 pVCpu->iem.s.cInstructions++;
15062 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15063 {
15064 uint32_t fCpu = pVCpu->fLocalForcedActions
15065 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15066 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15067 | VMCPU_FF_TLB_FLUSH
15068# ifdef VBOX_WITH_RAW_MODE
15069 | VMCPU_FF_TRPM_SYNC_IDT
15070 | VMCPU_FF_SELM_SYNC_TSS
15071 | VMCPU_FF_SELM_SYNC_GDT
15072 | VMCPU_FF_SELM_SYNC_LDT
15073# endif
15074 | VMCPU_FF_INHIBIT_INTERRUPTS
15075 | VMCPU_FF_BLOCK_NMIS
15076 | VMCPU_FF_UNHALT ));
15077
15078 if (RT_LIKELY( ( !fCpu
15079 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15080 && !pCtx->rflags.Bits.u1IF) )
15081 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15082 {
15083 if (cInstr-- > 0)
15084 {
15085 Assert(pVCpu->iem.s.cActiveMappings == 0);
15086 iemReInitDecoder(pVCpu);
15087 continue;
15088 }
15089 }
15090 }
15091 Assert(pVCpu->iem.s.cActiveMappings == 0);
15092 }
15093 else if (pVCpu->iem.s.cActiveMappings > 0)
15094 iemMemRollback(pVCpu);
15095 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15096 break;
15097 }
15098 }
15099# ifdef IEM_WITH_SETJMP
15100 else
15101 {
15102 if (pVCpu->iem.s.cActiveMappings > 0)
15103 iemMemRollback(pVCpu);
15104 pVCpu->iem.s.cLongJumps++;
15105 }
15106 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15107# endif
15108
15109 /*
15110 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15111 */
15112 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15113 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15114# if defined(IEM_VERIFICATION_MODE_FULL)
15115 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15116 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15117 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15118 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15119# endif
15120 }
15121
15122 /*
15123 * Maybe re-enter raw-mode and log.
15124 */
15125# ifdef IN_RC
15126 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15127# endif
15128 if (rcStrict != VINF_SUCCESS)
15129 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15130 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15131 if (pcInstructions)
15132 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15133 return rcStrict;
15134#endif /* Not verification mode */
15135}
15136
15137
15138
15139/**
15140 * Injects a trap, fault, abort, software interrupt or external interrupt.
15141 *
15142 * The parameter list matches TRPMQueryTrapAll pretty closely.
15143 *
15144 * @returns Strict VBox status code.
15145 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15146 * @param u8TrapNo The trap number.
15147 * @param enmType What type is it (trap/fault/abort), software
15148 * interrupt or hardware interrupt.
15149 * @param uErrCode The error code if applicable.
15150 * @param uCr2 The CR2 value if applicable.
15151 * @param cbInstr The instruction length (only relevant for
15152 * software interrupts).
15153 */
15154VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15155 uint8_t cbInstr)
15156{
15157 iemInitDecoder(pVCpu, false);
15158#ifdef DBGFTRACE_ENABLED
15159 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15160 u8TrapNo, enmType, uErrCode, uCr2);
15161#endif
15162
15163 uint32_t fFlags;
15164 switch (enmType)
15165 {
15166 case TRPM_HARDWARE_INT:
15167 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15168 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15169 uErrCode = uCr2 = 0;
15170 break;
15171
15172 case TRPM_SOFTWARE_INT:
15173 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15174 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15175 uErrCode = uCr2 = 0;
15176 break;
15177
15178 case TRPM_TRAP:
15179 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15180 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15181 if (u8TrapNo == X86_XCPT_PF)
15182 fFlags |= IEM_XCPT_FLAGS_CR2;
15183 switch (u8TrapNo)
15184 {
15185 case X86_XCPT_DF:
15186 case X86_XCPT_TS:
15187 case X86_XCPT_NP:
15188 case X86_XCPT_SS:
15189 case X86_XCPT_PF:
15190 case X86_XCPT_AC:
15191 fFlags |= IEM_XCPT_FLAGS_ERR;
15192 break;
15193
15194 case X86_XCPT_NMI:
15195 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15196 break;
15197 }
15198 break;
15199
15200 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15201 }
15202
15203 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15204}
15205
15206
15207/**
15208 * Injects the active TRPM event.
15209 *
15210 * @returns Strict VBox status code.
15211 * @param pVCpu The cross context virtual CPU structure.
15212 */
15213VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15214{
15215#ifndef IEM_IMPLEMENTS_TASKSWITCH
15216 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15217#else
15218 uint8_t u8TrapNo;
15219 TRPMEVENT enmType;
15220 RTGCUINT uErrCode;
15221 RTGCUINTPTR uCr2;
15222 uint8_t cbInstr;
15223 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15224 if (RT_FAILURE(rc))
15225 return rc;
15226
15227 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15228
15229 /** @todo Are there any other codes that imply the event was successfully
15230 * delivered to the guest? See @bugref{6607}. */
15231 if ( rcStrict == VINF_SUCCESS
15232 || rcStrict == VINF_IEM_RAISED_XCPT)
15233 {
15234 TRPMResetTrap(pVCpu);
15235 }
15236 return rcStrict;
15237#endif
15238}
15239
15240
15241VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15242{
15243 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15244 return VERR_NOT_IMPLEMENTED;
15245}
15246
15247
15248VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15249{
15250 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15251 return VERR_NOT_IMPLEMENTED;
15252}
15253
15254
15255#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15256/**
15257 * Executes a IRET instruction with default operand size.
15258 *
15259 * This is for PATM.
15260 *
15261 * @returns VBox status code.
15262 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15263 * @param pCtxCore The register frame.
15264 */
15265VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15266{
15267 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15268
15269 iemCtxCoreToCtx(pCtx, pCtxCore);
15270 iemInitDecoder(pVCpu);
15271 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15272 if (rcStrict == VINF_SUCCESS)
15273 iemCtxToCtxCore(pCtxCore, pCtx);
15274 else
15275 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15276 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15277 return rcStrict;
15278}
15279#endif
15280
15281
15282/**
15283 * Macro used by the IEMExec* method to check the given instruction length.
15284 *
15285 * Will return on failure!
15286 *
15287 * @param a_cbInstr The given instruction length.
15288 * @param a_cbMin The minimum length.
15289 */
15290#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15291 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15292 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15293
15294
15295/**
15296 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15297 *
15298 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15299 *
15300 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15302 * @param rcStrict The status code to fiddle.
15303 */
15304DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15305{
15306 iemUninitExec(pVCpu);
15307#ifdef IN_RC
15308 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15309 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15310#else
15311 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15312#endif
15313}
15314
15315
15316/**
15317 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15318 *
15319 * This API ASSUMES that the caller has already verified that the guest code is
15320 * allowed to access the I/O port. (The I/O port is in the DX register in the
15321 * guest state.)
15322 *
15323 * @returns Strict VBox status code.
15324 * @param pVCpu The cross context virtual CPU structure.
15325 * @param cbValue The size of the I/O port access (1, 2, or 4).
15326 * @param enmAddrMode The addressing mode.
15327 * @param fRepPrefix Indicates whether a repeat prefix is used
15328 * (doesn't matter which for this instruction).
15329 * @param cbInstr The instruction length in bytes.
15330 * @param iEffSeg The effective segment address.
15331 * @param fIoChecked Whether the access to the I/O port has been
15332 * checked or not. It's typically checked in the
15333 * HM scenario.
15334 */
15335VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15336 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15337{
15338 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15339 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15340
15341 /*
15342 * State init.
15343 */
15344 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15345
15346 /*
15347 * Switch orgy for getting to the right handler.
15348 */
15349 VBOXSTRICTRC rcStrict;
15350 if (fRepPrefix)
15351 {
15352 switch (enmAddrMode)
15353 {
15354 case IEMMODE_16BIT:
15355 switch (cbValue)
15356 {
15357 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15358 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15359 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15360 default:
15361 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15362 }
15363 break;
15364
15365 case IEMMODE_32BIT:
15366 switch (cbValue)
15367 {
15368 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15369 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15370 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15371 default:
15372 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15373 }
15374 break;
15375
15376 case IEMMODE_64BIT:
15377 switch (cbValue)
15378 {
15379 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15380 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15381 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15382 default:
15383 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15384 }
15385 break;
15386
15387 default:
15388 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15389 }
15390 }
15391 else
15392 {
15393 switch (enmAddrMode)
15394 {
15395 case IEMMODE_16BIT:
15396 switch (cbValue)
15397 {
15398 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15399 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15400 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15401 default:
15402 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15403 }
15404 break;
15405
15406 case IEMMODE_32BIT:
15407 switch (cbValue)
15408 {
15409 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15410 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15411 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15412 default:
15413 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15414 }
15415 break;
15416
15417 case IEMMODE_64BIT:
15418 switch (cbValue)
15419 {
15420 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15421 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15422 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15423 default:
15424 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15425 }
15426 break;
15427
15428 default:
15429 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15430 }
15431 }
15432
15433 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15434}
15435
15436
15437/**
15438 * Interface for HM and EM for executing string I/O IN (read) instructions.
15439 *
15440 * This API ASSUMES that the caller has already verified that the guest code is
15441 * allowed to access the I/O port. (The I/O port is in the DX register in the
15442 * guest state.)
15443 *
15444 * @returns Strict VBox status code.
15445 * @param pVCpu The cross context virtual CPU structure.
15446 * @param cbValue The size of the I/O port access (1, 2, or 4).
15447 * @param enmAddrMode The addressing mode.
15448 * @param fRepPrefix Indicates whether a repeat prefix is used
15449 * (doesn't matter which for this instruction).
15450 * @param cbInstr The instruction length in bytes.
15451 * @param fIoChecked Whether the access to the I/O port has been
15452 * checked or not. It's typically checked in the
15453 * HM scenario.
15454 */
15455VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15456 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15457{
15458 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15459
15460 /*
15461 * State init.
15462 */
15463 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15464
15465 /*
15466 * Switch orgy for getting to the right handler.
15467 */
15468 VBOXSTRICTRC rcStrict;
15469 if (fRepPrefix)
15470 {
15471 switch (enmAddrMode)
15472 {
15473 case IEMMODE_16BIT:
15474 switch (cbValue)
15475 {
15476 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15477 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15478 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15479 default:
15480 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15481 }
15482 break;
15483
15484 case IEMMODE_32BIT:
15485 switch (cbValue)
15486 {
15487 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15488 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15489 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15490 default:
15491 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15492 }
15493 break;
15494
15495 case IEMMODE_64BIT:
15496 switch (cbValue)
15497 {
15498 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15499 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15500 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15501 default:
15502 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15503 }
15504 break;
15505
15506 default:
15507 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15508 }
15509 }
15510 else
15511 {
15512 switch (enmAddrMode)
15513 {
15514 case IEMMODE_16BIT:
15515 switch (cbValue)
15516 {
15517 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15518 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15519 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15520 default:
15521 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15522 }
15523 break;
15524
15525 case IEMMODE_32BIT:
15526 switch (cbValue)
15527 {
15528 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15529 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15530 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15531 default:
15532 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15533 }
15534 break;
15535
15536 case IEMMODE_64BIT:
15537 switch (cbValue)
15538 {
15539 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15540 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15541 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15542 default:
15543 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15544 }
15545 break;
15546
15547 default:
15548 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15549 }
15550 }
15551
15552 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15553}
15554
15555
15556/**
15557 * Interface for rawmode to write execute an OUT instruction.
15558 *
15559 * @returns Strict VBox status code.
15560 * @param pVCpu The cross context virtual CPU structure.
15561 * @param cbInstr The instruction length in bytes.
15562 * @param u16Port The port to read.
15563 * @param cbReg The register size.
15564 *
15565 * @remarks In ring-0 not all of the state needs to be synced in.
15566 */
15567VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15568{
15569 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15570 Assert(cbReg <= 4 && cbReg != 3);
15571
15572 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15573 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15574 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15575}
15576
15577
15578/**
15579 * Interface for rawmode to write execute an IN instruction.
15580 *
15581 * @returns Strict VBox status code.
15582 * @param pVCpu The cross context virtual CPU structure.
15583 * @param cbInstr The instruction length in bytes.
15584 * @param u16Port The port to read.
15585 * @param cbReg The register size.
15586 */
15587VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15588{
15589 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15590 Assert(cbReg <= 4 && cbReg != 3);
15591
15592 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15593 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15594 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15595}
15596
15597
15598/**
15599 * Interface for HM and EM to write to a CRx register.
15600 *
15601 * @returns Strict VBox status code.
15602 * @param pVCpu The cross context virtual CPU structure.
15603 * @param cbInstr The instruction length in bytes.
15604 * @param iCrReg The control register number (destination).
15605 * @param iGReg The general purpose register number (source).
15606 *
15607 * @remarks In ring-0 not all of the state needs to be synced in.
15608 */
15609VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15610{
15611 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15612 Assert(iCrReg < 16);
15613 Assert(iGReg < 16);
15614
15615 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15616 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15617 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15618}
15619
15620
15621/**
15622 * Interface for HM and EM to read from a CRx register.
15623 *
15624 * @returns Strict VBox status code.
15625 * @param pVCpu The cross context virtual CPU structure.
15626 * @param cbInstr The instruction length in bytes.
15627 * @param iGReg The general purpose register number (destination).
15628 * @param iCrReg The control register number (source).
15629 *
15630 * @remarks In ring-0 not all of the state needs to be synced in.
15631 */
15632VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15633{
15634 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15635 Assert(iCrReg < 16);
15636 Assert(iGReg < 16);
15637
15638 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15639 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15640 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15641}
15642
15643
15644/**
15645 * Interface for HM and EM to clear the CR0[TS] bit.
15646 *
15647 * @returns Strict VBox status code.
15648 * @param pVCpu The cross context virtual CPU structure.
15649 * @param cbInstr The instruction length in bytes.
15650 *
15651 * @remarks In ring-0 not all of the state needs to be synced in.
15652 */
15653VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15654{
15655 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15656
15657 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15658 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15659 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15660}
15661
15662
15663/**
15664 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15665 *
15666 * @returns Strict VBox status code.
15667 * @param pVCpu The cross context virtual CPU structure.
15668 * @param cbInstr The instruction length in bytes.
15669 * @param uValue The value to load into CR0.
15670 *
15671 * @remarks In ring-0 not all of the state needs to be synced in.
15672 */
15673VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15674{
15675 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15676
15677 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15678 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15679 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15680}
15681
15682
15683/**
15684 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15685 *
15686 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15687 *
15688 * @returns Strict VBox status code.
15689 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15690 * @param cbInstr The instruction length in bytes.
15691 * @remarks In ring-0 not all of the state needs to be synced in.
15692 * @thread EMT(pVCpu)
15693 */
15694VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15695{
15696 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15697
15698 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15699 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15700 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15701}
15702
15703
15704/**
15705 * Checks if IEM is in the process of delivering an event (interrupt or
15706 * exception).
15707 *
15708 * @returns true if we're in the process of raising an interrupt or exception,
15709 * false otherwise.
15710 * @param pVCpu The cross context virtual CPU structure.
15711 * @param puVector Where to store the vector associated with the
15712 * currently delivered event, optional.
15713 * @param pfFlags Where to store th event delivery flags (see
15714 * IEM_XCPT_FLAGS_XXX), optional.
15715 * @param puErr Where to store the error code associated with the
15716 * event, optional.
15717 * @param puCr2 Where to store the CR2 associated with the event,
15718 * optional.
15719 * @remarks The caller should check the flags to determine if the error code and
15720 * CR2 are valid for the event.
15721 */
15722VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15723{
15724 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15725 if (fRaisingXcpt)
15726 {
15727 if (puVector)
15728 *puVector = pVCpu->iem.s.uCurXcpt;
15729 if (pfFlags)
15730 *pfFlags = pVCpu->iem.s.fCurXcpt;
15731 if (puErr)
15732 *puErr = pVCpu->iem.s.uCurXcptErr;
15733 if (puCr2)
15734 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15735 }
15736 return fRaisingXcpt;
15737}
15738
15739
15740#ifdef VBOX_WITH_NESTED_HWVIRT
15741/**
15742 * Interface for HM and EM to emulate the STGI instruction.
15743 *
15744 * @returns Strict VBox status code.
15745 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15746 * @param cbInstr The instruction length in bytes.
15747 * @thread EMT(pVCpu)
15748 */
15749VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15750{
15751 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15752
15753 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15754 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15755 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15756}
15757
15758
15759/**
15760 * Interface for HM and EM to emulate the STGI instruction.
15761 *
15762 * @returns Strict VBox status code.
15763 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15764 * @param cbInstr The instruction length in bytes.
15765 * @thread EMT(pVCpu)
15766 */
15767VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15768{
15769 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15770
15771 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15772 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15773 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15774}
15775
15776
15777/**
15778 * Interface for HM and EM to emulate the VMLOAD instruction.
15779 *
15780 * @returns Strict VBox status code.
15781 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15782 * @param cbInstr The instruction length in bytes.
15783 * @thread EMT(pVCpu)
15784 */
15785VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15786{
15787 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15788
15789 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15790 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15791 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15792}
15793
15794
15795/**
15796 * Interface for HM and EM to emulate the VMSAVE instruction.
15797 *
15798 * @returns Strict VBox status code.
15799 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15800 * @param cbInstr The instruction length in bytes.
15801 * @thread EMT(pVCpu)
15802 */
15803VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15804{
15805 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15806
15807 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15808 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15809 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15810}
15811
15812
15813/**
15814 * Interface for HM and EM to emulate the INVLPGA instruction.
15815 *
15816 * @returns Strict VBox status code.
15817 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15818 * @param cbInstr The instruction length in bytes.
15819 * @thread EMT(pVCpu)
15820 */
15821VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15822{
15823 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15824
15825 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15826 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15827 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15828}
15829#endif /* VBOX_WITH_NESTED_HWVIRT */
15830
15831#ifdef IN_RING3
15832
15833/**
15834 * Handles the unlikely and probably fatal merge cases.
15835 *
15836 * @returns Merged status code.
15837 * @param rcStrict Current EM status code.
15838 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15839 * with @a rcStrict.
15840 * @param iMemMap The memory mapping index. For error reporting only.
15841 * @param pVCpu The cross context virtual CPU structure of the calling
15842 * thread, for error reporting only.
15843 */
15844DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15845 unsigned iMemMap, PVMCPU pVCpu)
15846{
15847 if (RT_FAILURE_NP(rcStrict))
15848 return rcStrict;
15849
15850 if (RT_FAILURE_NP(rcStrictCommit))
15851 return rcStrictCommit;
15852
15853 if (rcStrict == rcStrictCommit)
15854 return rcStrictCommit;
15855
15856 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15857 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15858 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15859 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15860 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15861 return VERR_IOM_FF_STATUS_IPE;
15862}
15863
15864
15865/**
15866 * Helper for IOMR3ProcessForceFlag.
15867 *
15868 * @returns Merged status code.
15869 * @param rcStrict Current EM status code.
15870 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15871 * with @a rcStrict.
15872 * @param iMemMap The memory mapping index. For error reporting only.
15873 * @param pVCpu The cross context virtual CPU structure of the calling
15874 * thread, for error reporting only.
15875 */
15876DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15877{
15878 /* Simple. */
15879 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15880 return rcStrictCommit;
15881
15882 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15883 return rcStrict;
15884
15885 /* EM scheduling status codes. */
15886 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15887 && rcStrict <= VINF_EM_LAST))
15888 {
15889 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15890 && rcStrictCommit <= VINF_EM_LAST))
15891 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15892 }
15893
15894 /* Unlikely */
15895 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15896}
15897
15898
15899/**
15900 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15901 *
15902 * @returns Merge between @a rcStrict and what the commit operation returned.
15903 * @param pVM The cross context VM structure.
15904 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15905 * @param rcStrict The status code returned by ring-0 or raw-mode.
15906 */
15907VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15908{
15909 /*
15910 * Reset the pending commit.
15911 */
15912 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15913 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15914 ("%#x %#x %#x\n",
15915 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15916 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15917
15918 /*
15919 * Commit the pending bounce buffers (usually just one).
15920 */
15921 unsigned cBufs = 0;
15922 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15923 while (iMemMap-- > 0)
15924 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15925 {
15926 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15927 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15928 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15929
15930 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15931 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15932 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15933
15934 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15935 {
15936 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15937 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15938 pbBuf,
15939 cbFirst,
15940 PGMACCESSORIGIN_IEM);
15941 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15942 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15943 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15944 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15945 }
15946
15947 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15948 {
15949 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15950 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15951 pbBuf + cbFirst,
15952 cbSecond,
15953 PGMACCESSORIGIN_IEM);
15954 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15955 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15956 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15957 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15958 }
15959 cBufs++;
15960 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15961 }
15962
15963 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15964 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15965 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15966 pVCpu->iem.s.cActiveMappings = 0;
15967 return rcStrict;
15968}
15969
15970#endif /* IN_RING3 */
15971
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette